11 |
use Time::HiRes qw/time/; |
use Time::HiRes qw/time/; |
12 |
use File::Pid; |
use File::Pid; |
13 |
use POSIX qw/strftime/; |
use POSIX qw/strftime/; |
14 |
|
use BackupPC::SearchLib; |
15 |
|
use Cwd qw/abs_path/; |
16 |
|
|
17 |
use constant BPC_FTYPE_DIR => 5; |
use constant BPC_FTYPE_DIR => 5; |
18 |
use constant EST_CHUNK => 100000; |
use constant EST_CHUNK => 100000; |
19 |
|
|
20 |
|
# daylight saving time change offset for 1h |
21 |
|
my $dst_offset = 60 * 60; |
22 |
|
|
23 |
my $debug = 0; |
my $debug = 0; |
24 |
$|=1; |
$|=1; |
25 |
|
|
26 |
my $start_t = time(); |
my $start_t = time(); |
27 |
|
|
28 |
my $pidfile = new File::Pid; |
my $pid_path = abs_path($0); |
29 |
|
$pid_path =~ s/\W+/_/g; |
30 |
|
|
31 |
|
my $pidfile = new File::Pid({ |
32 |
|
file => "/tmp/$pid_path", |
33 |
|
}); |
34 |
|
|
35 |
if (my $pid = $pidfile->running ) { |
if (my $pid = $pidfile->running ) { |
36 |
die "$0 already running: $pid\n"; |
die "$0 already running: $pid\n"; |
38 |
$pidfile->remove; |
$pidfile->remove; |
39 |
$pidfile = new File::Pid; |
$pidfile = new File::Pid; |
40 |
} |
} |
|
$pidfile->write; |
|
41 |
print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n"; |
print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n"; |
42 |
|
$pidfile->write; |
43 |
|
|
44 |
my $t_fmt = '%Y-%m-%d %H:%M:%S'; |
my $t_fmt = '%Y-%m-%d %H:%M:%S'; |
45 |
|
|
51 |
|
|
52 |
my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n"; |
my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n"; |
53 |
my $user = $Conf{SearchUser} || ''; |
my $user = $Conf{SearchUser} || ''; |
|
my $index_path = $Conf{HyperEstraierIndex}; |
|
|
$index_path = $TopDir . '/' . $index_path; |
|
|
$index_path =~ s#//#/#g; |
|
|
if ($index_path) { |
|
|
use HyperEstraier; |
|
|
} |
|
54 |
|
|
55 |
|
my $index_node_url = $Conf{HyperEstraierIndex}; |
56 |
|
|
57 |
my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 }); |
my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 }); |
58 |
|
|
59 |
my %opt; |
my %opt; |
60 |
|
|
61 |
if ( !getopts("cdm:v:i", \%opt ) ) { |
if ( !getopts("cdm:v:ijfq", \%opt ) ) { |
62 |
print STDERR <<EOF; |
print STDERR <<EOF; |
63 |
usage: $0 [-c|-d] [-m num] [-v|-v level] [-i] |
usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f] |
64 |
|
|
65 |
Options: |
Options: |
66 |
-c create database on first use |
-c create database on first use |
67 |
-d delete database before import |
-d delete database before import |
68 |
-m num import just num increments for one host |
-m num import just num increments for one host |
69 |
-v num set verbosity (debug) level (default $debug) |
-v num set verbosity (debug) level (default $debug) |
70 |
-i update HyperEstraier full text index |
-i update Hyper Estraier full text index |
71 |
|
-j update full text, don't check existing files |
72 |
|
-f don't do anything with full text index |
73 |
|
-q be quiet for hosts without changes |
74 |
|
|
75 |
|
Option -j is variation on -i. It will allow faster initial creation |
76 |
|
of full-text index from existing database. |
77 |
|
|
78 |
|
Option -f will create database which is out of sync with full text index. You |
79 |
|
will have to re-run $0 with -i to fix it. |
80 |
|
|
81 |
EOF |
EOF |
82 |
exit 1; |
exit 1; |
83 |
} |
} |
85 |
if ($opt{v}) { |
if ($opt{v}) { |
86 |
print "Debug level at $opt{v}\n"; |
print "Debug level at $opt{v}\n"; |
87 |
$debug = $opt{v}; |
$debug = $opt{v}; |
88 |
|
} elsif ($opt{f}) { |
89 |
|
print "WARNING: disabling full-text index update. You need to re-run $0 -j !\n"; |
90 |
|
$index_node_url = undef; |
91 |
} |
} |
92 |
|
|
93 |
#---- subs ---- |
#---- subs ---- |
105 |
return strftime($t_fmt,localtime()); |
return strftime($t_fmt,localtime()); |
106 |
} |
} |
107 |
|
|
108 |
my $hest_db; |
my $hest_node; |
|
|
|
|
sub signal { |
|
|
my($sig) = @_; |
|
|
if ($hest_db) { |
|
|
print "\nCaught a SIG$sig--syncing database and shutting down\n"; |
|
|
$hest_db->sync(); |
|
|
$hest_db->close(); |
|
|
} |
|
|
exit(0); |
|
|
} |
|
|
|
|
|
$SIG{'INT'} = \&signal; |
|
|
$SIG{'QUIT'} = \&signal; |
|
109 |
|
|
110 |
sub hest_update { |
sub hest_update { |
111 |
|
|
112 |
my ($host_id, $share_id, $num) = @_; |
my ($host_id, $share_id, $num) = @_; |
113 |
|
|
114 |
print curr_time," updating HyperEstraier:"; |
my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n"; |
115 |
|
|
116 |
|
unless (defined($index_node_url)) { |
117 |
|
print STDERR "HyperEstraier support not enabled in configuration\n"; |
118 |
|
$index_node_url = 0; |
119 |
|
return; |
120 |
|
} |
121 |
|
|
122 |
|
print curr_time," updating Hyper Estraier:"; |
123 |
|
|
124 |
my $t = time(); |
my $t = time(); |
125 |
|
|
126 |
my $offset = 0; |
my $offset = 0; |
127 |
my $added = 0; |
my $added = 0; |
128 |
|
|
129 |
print " opening index $index_path"; |
print " opening index $index_node_url"; |
130 |
$hest_db = HyperEstraier::Database->new(); |
if ($index_node_url) { |
131 |
$hest_db->open($index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT); |
$hest_node ||= Search::Estraier::Node->new( |
132 |
|
url => $index_node_url, |
133 |
print " increment is " . EST_CHUNK . " files:"; |
user => 'admin', |
134 |
|
passwd => 'admin', |
135 |
|
croak_on_error => 1, |
136 |
|
); |
137 |
|
print " via node URL"; |
138 |
|
} else { |
139 |
|
die "don't know how to use Hyper Estraier Index $index_node_url"; |
140 |
|
} |
141 |
|
|
142 |
my $results = 0; |
my $results = 0; |
143 |
|
|
145 |
|
|
146 |
my $where = ''; |
my $where = ''; |
147 |
my @data; |
my @data; |
148 |
if ($host_id && $share_id && $num) { |
if (defined($host_id) && defined($share_id) && defined($num)) { |
149 |
$where = qq{ |
$where = qq{ |
150 |
WHERE |
WHERE |
151 |
hosts.id = ? AND |
hosts.id = ? AND |
184 |
|
|
185 |
if ($results == 0) { |
if ($results == 0) { |
186 |
print " - no new files\n"; |
print " - no new files\n"; |
187 |
last; |
return; |
188 |
|
} else { |
189 |
|
print " - $results files: "; |
190 |
} |
} |
191 |
|
|
192 |
sub fmt_date { |
sub fmt_date { |
201 |
my $fid = $row->{'fid'} || die "no fid?"; |
my $fid = $row->{'fid'} || die "no fid?"; |
202 |
my $uri = 'file:///' . $fid; |
my $uri = 'file:///' . $fid; |
203 |
|
|
204 |
my $id = $hest_db->uri_to_id($uri); |
unless ($skip_check) { |
205 |
next unless ($id == -1); |
my $id = $hest_node->uri_to_id($uri); |
206 |
|
next if ($id && $id == -1); |
207 |
|
} |
208 |
|
|
209 |
# create a document object |
# create a document object |
210 |
my $doc = HyperEstraier::Document->new; |
my $doc = Search::Estraier::Document->new; |
211 |
|
|
212 |
# add attributes to the document object |
# add attributes to the document object |
213 |
$doc->add_attr('@uri', $uri); |
$doc->add_attr('@uri', $uri); |
214 |
|
|
215 |
foreach my $c (@{ $sth->{NAME} }) { |
foreach my $c (@{ $sth->{NAME} }) { |
216 |
$doc->add_attr($c, $row->{$c}) if ($row->{$c}); |
print STDERR "attr $c = $row->{$c}\n" if ($debug > 2); |
217 |
|
$doc->add_attr($c, $row->{$c}) if (defined($row->{$c})); |
218 |
} |
} |
219 |
|
|
220 |
#$doc->add_attr('@cdate', fmt_date($row->{'date'})); |
#$doc->add_attr('@cdate', fmt_date($row->{'date'})); |
228 |
print STDERR $doc->dump_draft,"\n" if ($debug > 1); |
print STDERR $doc->dump_draft,"\n" if ($debug > 1); |
229 |
|
|
230 |
# register the document object to the database |
# register the document object to the database |
231 |
$hest_db->put_doc($doc, $HyperEstraier::Database::PDCLEAN); |
if ($hest_node) { |
232 |
|
$hest_node->put_doc($doc); |
233 |
|
} else { |
234 |
|
die "not supported"; |
235 |
|
} |
236 |
$added++; |
$added++; |
237 |
} |
} |
238 |
|
|
239 |
print " $added"; |
print " $added"; |
|
$hest_db->sync(); |
|
240 |
|
|
241 |
$offset += EST_CHUNK; |
$offset += EST_CHUNK; |
242 |
|
|
243 |
} while ($results == EST_CHUNK); |
} while ($results == EST_CHUNK); |
244 |
|
|
|
print ", close"; |
|
|
$hest_db->close(); |
|
|
|
|
245 |
my $dur = (time() - $t) || 1; |
my $dur = (time() - $t) || 1; |
246 |
printf(" [%.2f/s dur: %s]\n", |
printf(" [%.2f/s dur: %s]\n", |
247 |
( $added / $dur ), |
( $added / $dur ), |
253 |
|
|
254 |
|
|
255 |
## update index ## |
## update index ## |
256 |
if (($opt{i} || ($index_path && ! -e $index_path)) && !$opt{c}) { |
if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) { |
257 |
# update all |
# update all |
258 |
print "force update of HyperEstraier index "; |
print "force update of Hyper Estraier index "; |
|
print "importing existing data" unless (-e $index_path); |
|
259 |
print "by -i flag" if ($opt{i}); |
print "by -i flag" if ($opt{i}); |
260 |
|
print "by -j flag" if ($opt{j}); |
261 |
print "\n"; |
print "\n"; |
262 |
hest_update(); |
hest_update(); |
263 |
} |
} |
266 |
if ($opt{c}) { |
if ($opt{c}) { |
267 |
sub do_index { |
sub do_index { |
268 |
my $index = shift || return; |
my $index = shift || return; |
269 |
my ($table,$col,$unique) = split(/_/, $index); |
my ($table,$col,$unique) = split(/:/, $index); |
270 |
$unique ||= ''; |
$unique ||= ''; |
271 |
$index =~ s/,/_/g; |
$index =~ s/\W+/_/g; |
272 |
|
print "$index on $table($col)" . ( $unique ? "u" : "" ) . " "; |
273 |
$dbh->do(qq{ create $unique index $index on $table($col) }); |
$dbh->do(qq{ create $unique index $index on $table($col) }); |
274 |
} |
} |
275 |
|
|
276 |
print "creating tables...\n"; |
print "creating tables...\n"; |
277 |
|
|
278 |
$dbh->do(qq{ |
$dbh->do( qq{ |
279 |
create table hosts ( |
create table hosts ( |
280 |
ID SERIAL PRIMARY KEY, |
ID SERIAL PRIMARY KEY, |
281 |
name VARCHAR(30) NOT NULL, |
name VARCHAR(30) NOT NULL, |
282 |
IP VARCHAR(15) |
IP VARCHAR(15) |
283 |
); |
); |
284 |
}); |
|
|
|
|
|
$dbh->do(qq{ |
|
285 |
create table shares ( |
create table shares ( |
286 |
ID SERIAL PRIMARY KEY, |
ID SERIAL PRIMARY KEY, |
287 |
hostID INTEGER NOT NULL references hosts(id), |
hostID INTEGER NOT NULL references hosts(id), |
288 |
name VARCHAR(30) NOT NULL, |
name VARCHAR(30) NOT NULL, |
289 |
share VARCHAR(200) NOT NULL, |
share VARCHAR(200) NOT NULL |
|
localpath VARCHAR(200) |
|
290 |
); |
); |
291 |
}); |
|
292 |
|
create table dvds ( |
293 |
$dbh->do(qq{ |
ID SERIAL PRIMARY KEY, |
294 |
|
num INTEGER NOT NULL, |
295 |
|
name VARCHAR(255) NOT NULL, |
296 |
|
mjesto VARCHAR(255) |
297 |
|
); |
298 |
|
|
299 |
create table backups ( |
create table backups ( |
300 |
|
id serial, |
301 |
hostID INTEGER NOT NULL references hosts(id), |
hostID INTEGER NOT NULL references hosts(id), |
302 |
num INTEGER NOT NULL, |
num INTEGER NOT NULL, |
303 |
date integer NOT NULL, |
date integer NOT NULL, |
304 |
type CHAR(4) not null, |
type CHAR(4) not null, |
305 |
shareID integer not null references shares(id), |
shareID integer not null references shares(id), |
306 |
size integer not null, |
size bigint not null, |
307 |
PRIMARY KEY(hostID, num, shareID) |
inc_size bigint not null default -1, |
308 |
|
inc_deleted boolean default false, |
309 |
|
parts integer not null default 1, |
310 |
|
PRIMARY KEY(id) |
311 |
); |
); |
|
}); |
|
312 |
|
|
313 |
#do_index('backups_hostid,num_unique'); |
create table files ( |
314 |
|
ID SERIAL, |
315 |
|
shareID INTEGER NOT NULL references shares(id), |
316 |
|
backupNum INTEGER NOT NULL, |
317 |
|
name VARCHAR(255) NOT NULL, |
318 |
|
path VARCHAR(255) NOT NULL, |
319 |
|
date integer NOT NULL, |
320 |
|
type INTEGER NOT NULL, |
321 |
|
size bigint NOT NULL, |
322 |
|
primary key(id) |
323 |
|
); |
324 |
|
|
325 |
$dbh->do(qq{ |
create table archive ( |
326 |
create table dvds ( |
id serial, |
327 |
ID SERIAL PRIMARY KEY, |
dvd_nr int not null, |
328 |
num INTEGER NOT NULL, |
total_size bigint default -1, |
329 |
name VARCHAR(255) NOT NULL, |
note text, |
330 |
mjesto VARCHAR(255) |
username varchar(20) not null, |
331 |
|
date timestamp default now(), |
332 |
|
primary key(id) |
333 |
|
); |
334 |
|
|
335 |
|
create table archive_backup ( |
336 |
|
archive_id int not null references archive(id) on delete cascade, |
337 |
|
backup_id int not null references backups(id), |
338 |
|
primary key(archive_id, backup_id) |
339 |
); |
); |
|
}); |
|
340 |
|
|
341 |
$dbh->do(qq{ |
create table archive_burned ( |
342 |
create table files ( |
archive_id int references archive(id), |
343 |
ID SERIAL PRIMARY KEY, |
date timestamp default now(), |
344 |
shareID INTEGER NOT NULL references shares(id), |
part int not null default 1, |
345 |
backupNum INTEGER NOT NULL, |
copy int not null default 1, |
346 |
name VARCHAR(255) NOT NULL, |
iso_size bigint default -1 |
347 |
path VARCHAR(255) NOT NULL, |
); |
348 |
date integer NOT NULL, |
|
349 |
type INTEGER NOT NULL, |
create table backup_parts ( |
350 |
size INTEGER NOT NULL, |
id serial, |
351 |
dvdid INTEGER references dvds(id) |
backup_id int references backups(id), |
352 |
|
part_nr int not null check (part_nr > 0), |
353 |
|
tar_size bigint not null check (tar_size > 0), |
354 |
|
size bigint not null check (size > 0), |
355 |
|
md5 text not null, |
356 |
|
items int not null check (items > 0), |
357 |
|
date timestamp default now(), |
358 |
|
primary key(id) |
359 |
); |
); |
360 |
}); |
}); |
361 |
|
|
362 |
print "creating indexes:"; |
print "creating indexes: "; |
363 |
|
|
364 |
foreach my $index (qw( |
foreach my $index (qw( |
365 |
hosts_name |
hosts:name |
366 |
backups_hostID |
backups:hostID |
367 |
backups_num |
backups:num |
368 |
shares_hostID |
backups:shareID |
369 |
shares_name |
shares:hostID |
370 |
files_shareID |
shares:name |
371 |
files_path |
files:shareID |
372 |
files_name |
files:path |
373 |
files_date |
files:name |
374 |
files_size |
files:date |
375 |
|
files:size |
376 |
|
archive:dvd_nr |
377 |
|
archive_burned:archive_id |
378 |
|
backup_parts:backup_id,part_nr |
379 |
)) { |
)) { |
|
print " $index"; |
|
380 |
do_index($index); |
do_index($index); |
381 |
} |
} |
382 |
|
|
383 |
|
print " creating sequence: "; |
384 |
|
foreach my $seq (qw/dvd_nr/) { |
385 |
|
print "$seq "; |
386 |
|
$dbh->do( qq{ CREATE SEQUENCE $seq } ); |
387 |
|
} |
388 |
|
|
389 |
|
|
390 |
print "...\n"; |
print "...\n"; |
391 |
|
|
392 |
$dbh->commit; |
$dbh->commit; |
430 |
|
|
431 |
$sth->{insert_backups} = $dbh->prepare(qq{ |
$sth->{insert_backups} = $dbh->prepare(qq{ |
432 |
INSERT INTO backups (hostID, num, date, type, shareid, size) |
INSERT INTO backups (hostID, num, date, type, shareid, size) |
433 |
VALUES (?,?,?,?,?,?) |
VALUES (?,?,?,?,?,-1) |
434 |
|
}); |
435 |
|
|
436 |
|
$sth->{update_backups_size} = $dbh->prepare(qq{ |
437 |
|
UPDATE backups SET size = ? |
438 |
|
WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ? |
439 |
}); |
}); |
440 |
|
|
441 |
$sth->{insert_files} = $dbh->prepare(qq{ |
$sth->{insert_files} = $dbh->prepare(qq{ |
444 |
VALUES (?,?,?,?,?,?,?) |
VALUES (?,?,?,?,?,?,?) |
445 |
}); |
}); |
446 |
|
|
447 |
foreach my $host_key (keys %{$hosts}) { |
my @hosts = keys %{$hosts}; |
448 |
|
my $host_nr = 0; |
449 |
|
|
450 |
|
foreach my $host_key (@hosts) { |
451 |
|
|
452 |
my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key"; |
my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key"; |
453 |
|
|
462 |
$hostID = $dbh->last_insert_id(undef,undef,'hosts',undef); |
$hostID = $dbh->last_insert_id(undef,undef,'hosts',undef); |
463 |
} |
} |
464 |
|
|
465 |
print "host ".$hosts->{$host_key}->{'host'}.": "; |
$host_nr++; |
|
|
|
466 |
# get backups for a host |
# get backups for a host |
467 |
my @backups = $bpc->BackupInfoRead($hostname); |
my @backups = $bpc->BackupInfoRead($hostname); |
468 |
my $incs = scalar @backups; |
my $incs = scalar @backups; |
|
print "$incs increments\n"; |
|
469 |
|
|
470 |
|
my $host_header = sprintf("host %s [%d/%d]: %d increments\n", |
471 |
|
$hosts->{$host_key}->{'host'}, |
472 |
|
$host_nr, |
473 |
|
($#hosts + 1), |
474 |
|
$incs |
475 |
|
); |
476 |
|
print $host_header unless ($opt{q}); |
477 |
|
|
478 |
my $inc_nr = 0; |
my $inc_nr = 0; |
479 |
$beenThere = {}; |
$beenThere = {}; |
480 |
|
|
486 |
my $backupNum = $backup->{'num'}; |
my $backupNum = $backup->{'num'}; |
487 |
my @backupShares = (); |
my @backupShares = (); |
488 |
|
|
489 |
printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", |
my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", |
490 |
$hosts->{$host_key}->{'host'}, |
$hosts->{$host_key}->{'host'}, |
491 |
$inc_nr, $incs, $backupNum, |
$inc_nr, $incs, $backupNum, |
492 |
$backup->{type} || '?', |
$backup->{type} || '?', |
494 |
strftime($t_fmt,localtime($backup->{startTime})), |
strftime($t_fmt,localtime($backup->{startTime})), |
495 |
fmt_time($backup->{endTime} - $backup->{startTime}) |
fmt_time($backup->{endTime} - $backup->{startTime}) |
496 |
); |
); |
497 |
|
print $share_header unless ($opt{q}); |
498 |
|
|
499 |
my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1); |
my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1); |
500 |
foreach my $share ($files->shareList($backupNum)) { |
foreach my $share ($files->shareList($backupNum)) { |
508 |
# skip if allready in database! |
# skip if allready in database! |
509 |
next if ($count > 0); |
next if ($count > 0); |
510 |
|
|
511 |
|
# dump host and share header for -q |
512 |
|
if ($opt{q}) { |
513 |
|
if ($host_header) { |
514 |
|
print $host_header; |
515 |
|
$host_header = undef; |
516 |
|
} |
517 |
|
print $share_header; |
518 |
|
} |
519 |
|
|
520 |
# dump some log |
# dump some log |
521 |
print curr_time," ", $share; |
print curr_time," ", $share; |
522 |
|
|
|
my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); |
|
|
|
|
523 |
$sth->{insert_backups}->execute( |
$sth->{insert_backups}->execute( |
524 |
$hostID, |
$hostID, |
525 |
$backupNum, |
$backupNum, |
526 |
$backup->{'endTime'}, |
$backup->{'endTime'}, |
527 |
$backup->{'type'}, |
substr($backup->{'type'},0,4), |
528 |
$shareID, |
$shareID, |
|
$size, |
|
529 |
); |
); |
530 |
|
|
531 |
print " commit"; |
my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); |
532 |
$dbh->commit(); |
|
533 |
|
eval { |
534 |
|
$sth->{update_backups_size}->execute( |
535 |
|
$size, |
536 |
|
$hostID, |
537 |
|
$backupNum, |
538 |
|
$backup->{'endTime'}, |
539 |
|
substr($backup->{'type'},0,4), |
540 |
|
$shareID, |
541 |
|
); |
542 |
|
print " commit"; |
543 |
|
$dbh->commit(); |
544 |
|
}; |
545 |
|
if ($@) { |
546 |
|
print " rollback"; |
547 |
|
$dbh->rollback(); |
548 |
|
} |
549 |
|
|
550 |
my $dur = (time() - $t) || 1; |
my $dur = (time() - $t) || 1; |
551 |
printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n", |
printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n", |
584 |
|
|
585 |
$sth->{insert_share} ||= $dbh->prepare(qq{ |
$sth->{insert_share} ||= $dbh->prepare(qq{ |
586 |
INSERT INTO shares |
INSERT INTO shares |
587 |
(hostID,name,share,localpath) |
(hostID,name,share) |
588 |
VALUES (?,?,?,?) |
VALUES (?,?,?) |
589 |
}); |
}); |
590 |
|
|
591 |
my $drop_down = $hostname . '/' . $share; |
my $drop_down = $hostname . '/' . $share; |
592 |
$drop_down =~ s#//+#/#g; |
$drop_down =~ s#//+#/#g; |
593 |
|
|
594 |
$sth->{insert_share}->execute($hostID,$share, $drop_down ,undef); |
$sth->{insert_share}->execute($hostID,$share, $drop_down); |
595 |
return $dbh->last_insert_id(undef,undef,'shares',undef); |
return $dbh->last_insert_id(undef,undef,'shares',undef); |
596 |
} |
} |
597 |
|
|
608 |
SELECT 1 FROM files |
SELECT 1 FROM files |
609 |
WHERE shareID = ? and |
WHERE shareID = ? and |
610 |
path = ? and |
path = ? and |
611 |
date = ? and |
size = ? and |
612 |
size = ? |
( date = ? or date = ? or date = ? ) |
613 |
LIMIT 1 |
LIMIT 1 |
614 |
}); |
}); |
615 |
|
|
616 |
my @param = ($shareID,$path,$date,$size); |
my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset); |
617 |
$sth->{file_in_db}->execute(@param); |
$sth->{file_in_db}->execute(@param); |
618 |
my $rows = $sth->{file_in_db}->rows; |
my $rows = $sth->{file_in_db}->rows; |
619 |
print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3); |
print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3); |
663 |
$filesInBackup->{$path_key}->{'size'} |
$filesInBackup->{$path_key}->{'size'} |
664 |
)); |
)); |
665 |
|
|
666 |
|
my $key_dst_prev = join(" ", ( |
667 |
|
$shareID, |
668 |
|
$dir, |
669 |
|
$path_key, |
670 |
|
$filesInBackup->{$path_key}->{'mtime'} - $dst_offset, |
671 |
|
$filesInBackup->{$path_key}->{'size'} |
672 |
|
)); |
673 |
|
|
674 |
|
my $key_dst_next = join(" ", ( |
675 |
|
$shareID, |
676 |
|
$dir, |
677 |
|
$path_key, |
678 |
|
$filesInBackup->{$path_key}->{'mtime'} + $dst_offset, |
679 |
|
$filesInBackup->{$path_key}->{'size'} |
680 |
|
)); |
681 |
|
|
682 |
my $found; |
my $found; |
683 |
if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) { |
if ( |
684 |
|
! defined($beenThere->{$key}) && |
685 |
|
! defined($beenThere->{$key_dst_prev}) && |
686 |
|
! defined($beenThere->{$key_dst_next}) && |
687 |
|
! ($found = found_in_db($key, @data)) |
688 |
|
) { |
689 |
print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2); |
print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2); |
690 |
|
|
691 |
if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) { |
if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) { |