15 |
use Cwd qw/abs_path/; |
use Cwd qw/abs_path/; |
16 |
|
|
17 |
use constant BPC_FTYPE_DIR => 5; |
use constant BPC_FTYPE_DIR => 5; |
18 |
use constant EST_CHUNK => 100000; |
use constant EST_CHUNK => 4096; |
19 |
|
|
20 |
# daylight saving time change offset for 1h |
# daylight saving time change offset for 1h |
21 |
my $dst_offset = 60 * 60; |
my $dst_offset = 60 * 60; |
58 |
|
|
59 |
my %opt; |
my %opt; |
60 |
|
|
61 |
if ( !getopts("cdm:v:ijf", \%opt ) ) { |
if ( !getopts("cdm:v:ijfq", \%opt ) ) { |
62 |
print STDERR <<EOF; |
print STDERR <<EOF; |
63 |
usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f] |
usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f] |
64 |
|
|
70 |
-i update Hyper Estraier full text index |
-i update Hyper Estraier full text index |
71 |
-j update full text, don't check existing files |
-j update full text, don't check existing files |
72 |
-f don't do anything with full text index |
-f don't do anything with full text index |
73 |
|
-q be quiet for hosts without changes |
74 |
|
|
75 |
Option -j is variation on -i. It will allow faster initial creation |
Option -j is variation on -i. It will allow faster initial creation |
76 |
of full-text index from existing database. |
of full-text index from existing database. |
105 |
return strftime($t_fmt,localtime()); |
return strftime($t_fmt,localtime()); |
106 |
} |
} |
107 |
|
|
|
my $hest_db; |
|
108 |
my $hest_node; |
my $hest_node; |
109 |
|
|
|
sub signal { |
|
|
my($sig) = @_; |
|
|
if ($hest_db) { |
|
|
print "\nCaught a SIG$sig--syncing database and shutting down\n"; |
|
|
$hest_db->sync(); |
|
|
$hest_db->close(); |
|
|
} |
|
|
exit(0); |
|
|
} |
|
|
|
|
|
$SIG{'INT'} = \&signal; |
|
|
$SIG{'QUIT'} = \&signal; |
|
|
|
|
110 |
sub hest_update { |
sub hest_update { |
111 |
|
|
112 |
my ($host_id, $share_id, $num) = @_; |
my ($host_id, $share_id, $num) = @_; |
126 |
my $offset = 0; |
my $offset = 0; |
127 |
my $added = 0; |
my $added = 0; |
128 |
|
|
|
print " opening index $index_node_url"; |
|
129 |
if ($index_node_url) { |
if ($index_node_url) { |
130 |
$hest_node ||= Search::Estraier::Node->new($index_node_url); |
print " opening index $index_node_url"; |
131 |
$hest_node->set_auth('admin', 'admin'); |
$hest_node ||= Search::Estraier::Node->new( |
132 |
|
url => $index_node_url, |
133 |
|
user => 'admin', |
134 |
|
passwd => 'admin', |
135 |
|
croak_on_error => 1, |
136 |
|
); |
137 |
print " via node URL"; |
print " via node URL"; |
|
} else { |
|
|
die "don't know how to use Hyper Estraier Index $index_node_url"; |
|
138 |
} |
} |
139 |
|
|
140 |
my $results = 0; |
my $results = 0; |
182 |
|
|
183 |
if ($results == 0) { |
if ($results == 0) { |
184 |
print " - no new files\n"; |
print " - no new files\n"; |
185 |
last; |
return; |
186 |
} else { |
} else { |
187 |
print " - $results files: "; |
print "..."; |
188 |
} |
} |
189 |
|
|
190 |
sub fmt_date { |
sub fmt_date { |
196 |
|
|
197 |
while (my $row = $sth->fetchrow_hashref()) { |
while (my $row = $sth->fetchrow_hashref()) { |
198 |
|
|
199 |
my $fid = $row->{'fid'} || die "no fid?"; |
my $uri = $row->{hname} . ':' . $row->{sname} . '#' . $row->{backupnum} . ' ' . $row->{filepath}; |
200 |
my $uri = 'file:///' . $fid; |
unless ($skip_check && $hest_node) { |
201 |
|
my $id = $hest_node->uri_to_id($uri); |
202 |
unless ($skip_check) { |
next if ($id && $id == -1); |
|
my $id = ($hest_db || $hest_node)->uri_to_id($uri); |
|
|
next unless ($id == -1); |
|
203 |
} |
} |
204 |
|
|
205 |
# create a document object |
# create a document object |
209 |
$doc->add_attr('@uri', $uri); |
$doc->add_attr('@uri', $uri); |
210 |
|
|
211 |
foreach my $c (@{ $sth->{NAME} }) { |
foreach my $c (@{ $sth->{NAME} }) { |
212 |
|
print STDERR "attr $c = $row->{$c}\n" if ($debug > 2); |
213 |
$doc->add_attr($c, $row->{$c}) if (defined($row->{$c})); |
$doc->add_attr($c, $row->{$c}) if (defined($row->{$c})); |
214 |
} |
} |
215 |
|
|
232 |
$added++; |
$added++; |
233 |
} |
} |
234 |
|
|
235 |
print " $added"; |
print "$added"; |
236 |
|
|
237 |
$offset += EST_CHUNK; |
$offset += EST_CHUNK; |
238 |
|
|
302 |
size bigint not null, |
size bigint not null, |
303 |
inc_size bigint not null default -1, |
inc_size bigint not null default -1, |
304 |
inc_deleted boolean default false, |
inc_deleted boolean default false, |
305 |
parts integer not null default 1, |
parts integer not null default 0, |
306 |
PRIMARY KEY(id) |
PRIMARY KEY(id) |
307 |
); |
); |
308 |
|
|
382 |
$dbh->do( qq{ CREATE SEQUENCE $seq } ); |
$dbh->do( qq{ CREATE SEQUENCE $seq } ); |
383 |
} |
} |
384 |
|
|
385 |
|
print " creating triggers "; |
386 |
|
$dbh->do( qq{ |
387 |
|
create or replace function backup_parts_check() returns trigger as ' |
388 |
|
declare |
389 |
|
b_parts integer; |
390 |
|
b_counted integer; |
391 |
|
b_id integer; |
392 |
|
begin |
393 |
|
if (TG_OP=''UPDATE'') then |
394 |
|
b_id := old.id; |
395 |
|
b_parts := old.parts; |
396 |
|
elsif (TG_OP = ''INSERT'') then |
397 |
|
b_id := new.id; |
398 |
|
b_parts := new.parts; |
399 |
|
end if; |
400 |
|
b_counted := (select count(*) from backup_parts where backup_id = b_id); |
401 |
|
if ( b_parts != b_counted ) then |
402 |
|
raise exception ''Update of backup % aborted, requested % parts and there are really % parts'', b_id, b_parts, b_counted; |
403 |
|
end if; |
404 |
|
return null; |
405 |
|
end; |
406 |
|
' language plpgsql; |
407 |
|
|
408 |
|
create trigger do_backup_parts_check |
409 |
|
after insert or update or delete on backups |
410 |
|
for each row execute procedure backup_parts_check(); |
411 |
|
}); |
412 |
|
|
413 |
print "...\n"; |
print "...\n"; |
414 |
|
|
486 |
} |
} |
487 |
|
|
488 |
$host_nr++; |
$host_nr++; |
|
print "host ", $hosts->{$host_key}->{'host'}, " [", |
|
|
$host_nr, "/", ($#hosts + 1), "]: "; |
|
|
|
|
489 |
# get backups for a host |
# get backups for a host |
490 |
my @backups = $bpc->BackupInfoRead($hostname); |
my @backups = $bpc->BackupInfoRead($hostname); |
491 |
my $incs = scalar @backups; |
my $incs = scalar @backups; |
|
print "$incs increments\n"; |
|
492 |
|
|
493 |
|
my $host_header = sprintf("host %s [%d/%d]: %d increments\n", |
494 |
|
$hosts->{$host_key}->{'host'}, |
495 |
|
$host_nr, |
496 |
|
($#hosts + 1), |
497 |
|
$incs |
498 |
|
); |
499 |
|
print $host_header unless ($opt{q}); |
500 |
|
|
501 |
my $inc_nr = 0; |
my $inc_nr = 0; |
502 |
$beenThere = {}; |
$beenThere = {}; |
503 |
|
|
509 |
my $backupNum = $backup->{'num'}; |
my $backupNum = $backup->{'num'}; |
510 |
my @backupShares = (); |
my @backupShares = (); |
511 |
|
|
512 |
printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", |
my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", |
513 |
$hosts->{$host_key}->{'host'}, |
$hosts->{$host_key}->{'host'}, |
514 |
$inc_nr, $incs, $backupNum, |
$inc_nr, $incs, $backupNum, |
515 |
$backup->{type} || '?', |
$backup->{type} || '?', |
517 |
strftime($t_fmt,localtime($backup->{startTime})), |
strftime($t_fmt,localtime($backup->{startTime})), |
518 |
fmt_time($backup->{endTime} - $backup->{startTime}) |
fmt_time($backup->{endTime} - $backup->{startTime}) |
519 |
); |
); |
520 |
|
print $share_header unless ($opt{q}); |
521 |
|
|
522 |
my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1); |
my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1); |
523 |
foreach my $share ($files->shareList($backupNum)) { |
foreach my $share ($files->shareList($backupNum)) { |
531 |
# skip if allready in database! |
# skip if allready in database! |
532 |
next if ($count > 0); |
next if ($count > 0); |
533 |
|
|
534 |
|
# dump host and share header for -q |
535 |
|
if ($opt{q}) { |
536 |
|
if ($host_header) { |
537 |
|
print $host_header; |
538 |
|
$host_header = undef; |
539 |
|
} |
540 |
|
print $share_header; |
541 |
|
} |
542 |
|
|
543 |
# dump some log |
# dump some log |
544 |
print curr_time," ", $share; |
print curr_time," ", $share; |
545 |
|
|