--- trunk/bin/BackupPC_updatedb 2005/08/21 15:59:55 62 +++ trunk/bin/BackupPC_updatedb 2005/08/30 14:19:54 98 @@ -13,6 +13,7 @@ use POSIX qw/strftime/; use constant BPC_FTYPE_DIR => 5; +use constant EST_CHUNK => 10000; my $debug = 0; $|=1; @@ -40,26 +41,199 @@ my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n"; my $user = $Conf{SearchUser} || ''; +my $index_path = $Conf{HyperEstraierIndex}; +$index_path = $TopDir . '/' . $index_path; +$index_path =~ s#//#/#g; +if ($index_path) { + use HyperEstraier; +} + my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 }); my %opt; -if ( !getopts("cdm:v:", \%opt ) ) { +if ( !getopts("cdm:v:i", \%opt ) ) { print STDERR <sync(); + $hest_db->close(); + } + exit(0); +} + +$SIG{'INT'} = \&signal; +$SIG{'QUIT'} = \&signal; + +sub hest_update { + + my ($host_id, $share_id, $num) = @_; + + print curr_time," updating HyperEstraier:"; + + my $t = time(); + + my $offset = 0; + my $added = 0; + + print " opening index $index_path"; + $hest_db = HyperEstraier::Database->new(); + $hest_db->open($index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT); + + my $results = 0; + + do { + + my $where = ''; + if ($host_id && $share_id && $num) { + $where = qq{ + WHERE + hosts.id = ? AND + shares.id = ? AND + files.backupnum = ? + }; + } + + my $limit = sprintf('LIMIT '.EST_CHUNK.' OFFSET %d', $offset); + + my $sth = $dbh->prepare(qq{ + SELECT + files.id AS fid, + hosts.name AS hname, + shares.name AS sname, + -- shares.share AS sharename, + files.backupnum AS backupnum, + -- files.name AS filename, + files.path AS filepath, + files.date AS date, + files.type AS type, + files.size AS size, + files.shareid AS shareid, + backups.date AS backup_date + FROM files + INNER JOIN shares ON files.shareID=shares.ID + INNER JOIN hosts ON hosts.ID = shares.hostID + INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID + $where + $limit + }); + + $sth->execute(@_); + $results = $sth->rows; + + if ($results == 0) { + print " - no more files\n"; + last; + } + + sub fmt_date { + my $t = shift || return; + my $iso = BackupPC::Lib::timeStamp($t); + $iso =~ s/\s/T/; + return $iso; + } + + while (my $row = $sth->fetchrow_hashref()) { + + my $fid = $row->{'fid'} || die "no fid?"; + my $uri = 'file:///' . $fid; + + my $id = $hest_db->uri_to_id($uri); + next unless ($id == -1); + + # create a document object + my $doc = HyperEstraier::Document->new; + + # add attributes to the document object + $doc->add_attr('@uri', $uri); + foreach my $c (@{ $sth->{NAME} }) { + $doc->add_attr($c, $row->{$c}) if ($row->{$c}); + } + + #$doc->add_attr('@cdate', fmt_date($row->{'date'})); + + # add the body text to the document object + my $path = $row->{'filepath'}; + $doc->add_text($path); + $path =~ s/(.)/$1 /g; + $doc->add_hidden_text($path); + + print STDERR $doc->dump_draft,"\n" if ($debug > 1); + + # register the document object to the database + $hest_db->put_doc($doc, $HyperEstraier::Database::PDCLEAN); + $added++; + } + + print " $added"; + $hest_db->sync(); + + $offset += EST_CHUNK; + + } while ($results == EST_CHUNK); + + print ", close"; + $hest_db->close(); + + my $dur = (time() - $t) || 1; + printf(" [%.2f/s dur: %s]\n", + ( $added / $dur ), + fmt_time($dur) + ); +} + +#---- /subs ---- + + +## update index ## +if (($opt{i} || ($index_path && ! -e $index_path)) && !$opt{c}) { + # update all + print "force update of HyperEstraier index "; + print "importing existing data" unless (-e $index_path); + print "by -i flag" if ($opt{i}); + print "\n"; + hest_update(); +} + +## create tables ## if ($opt{c}) { sub do_index { my $index = shift || return; @@ -95,11 +269,13 @@ num INTEGER NOT NULL, date integer NOT NULL, type CHAR(4) not null, - PRIMARY KEY(hostID, num) + shareID integer not null references shares(id), + size integer not null, + PRIMARY KEY(hostID, num, shareID) ); }); - do_index('backups_hostid,num_unique'); + #do_index('backups_hostid,num_unique'); $dbh->do(qq{ create table dvds ( @@ -147,6 +323,7 @@ } +## delete data before inseting ## if ($opt{d}) { print "deleting "; foreach my $table (qw(files dvds backups shares hosts)) { @@ -158,12 +335,7 @@ $dbh->commit; } -if ($opt{v}) { - print "Debug level at $opt{v}\n"; - $debug = $opt{v}; -} - -#################################INSERT VALUES############################# +## insert new values ## # get hosts $hosts = $bpc->HostInfoRead(); @@ -180,15 +352,15 @@ SELECT ID FROM hosts WHERE name=? }); -$sth->{backups_broj} = $dbh->prepare(qq{ +$sth->{backups_count} = $dbh->prepare(qq{ SELECT COUNT(*) FROM backups -WHERE hostID=? AND num=? +WHERE hostID=? AND num=? AND shareid=? }); $sth->{insert_backups} = $dbh->prepare(qq{ -INSERT INTO backups (hostID, num, date, type) -VALUES (?,?,?,?) +INSERT INTO backups (hostID, num, date, type, shareid, size) +VALUES (?,?,?,?,?,?) }); $sth->{insert_files} = $dbh->prepare(qq{ @@ -197,15 +369,6 @@ VALUES (?,?,?,?,?,?,?) }); -sub fmt_time { - my $t = shift || return; - my $out = ""; - my ($ss,$mm,$hh) = gmtime($t); - $out .= "${hh}h" if ($hh); - $out .= sprintf("%02d:%02d", $mm,$ss); - return $out; -} - foreach my $host_key (keys %{$hosts}) { my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key"; @@ -221,13 +384,15 @@ $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef); } - print("host ".$hosts->{$host_key}->{'host'}.": "); + print "host ".$hosts->{$host_key}->{'host'}.": "; # get backups for a host my @backups = $bpc->BackupInfoRead($hostname); - print scalar @backups, " increments\n"; + my $incs = scalar @backups; + print "$incs increments\n"; my $inc_nr = 0; + $beenThere = {}; foreach my $backup (@backups) { @@ -237,43 +402,53 @@ my $backupNum = $backup->{'num'}; my @backupShares = (); - print $hosts->{$host_key}->{'host'}, - "\t#$backupNum\t", $backup->{type} || '?', " ", - $backup->{nFilesNew} || '?', "/", $backup->{nFiles} || '?', - " files (date: ", + printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", + $hosts->{$host_key}->{'host'}, + $inc_nr, $incs, $backupNum, + $backup->{type} || '?', + $backup->{nFilesNew} || '?', $backup->{nFiles} || '?', strftime($t_fmt,localtime($backup->{startTime})), - " dur: ", - fmt_time($backup->{endTime} - $backup->{startTime}), - ")\n"; - - $sth->{backups_broj}->execute($hostID, $backupNum); - my ($broj) = $sth->{backups_broj}->fetchrow_array(); - next if ($broj > 0); - - $sth->{insert_backups}->execute( - $hostID, - $backupNum, - $backup->{'endTime'}, - $backup->{'type'} + fmt_time($backup->{endTime} - $backup->{startTime}) ); - $dbh->commit(); my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1); foreach my $share ($files->shareList($backupNum)) { my $t = time(); - print strftime($t_fmt,localtime())," ", $share; $shareID = getShareID($share, $hostID, $hostname); - my ($f, $nf, $d, $nd) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); + $sth->{backups_count}->execute($hostID, $backupNum, $shareID); + my ($count) = $sth->{backups_count}->fetchrow_array(); + # skip if allready in database! + next if ($count > 0); + + # dump some log + print curr_time," ", $share; + + my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); + + $sth->{insert_backups}->execute( + $hostID, + $backupNum, + $backup->{'endTime'}, + $backup->{'type'}, + $shareID, + $size, + ); + + print " commit"; + $dbh->commit(); + my $dur = (time() - $t) || 1; - printf(" %d/%d files %d/%d dirs [%.2f/s dur: %s]\n", + printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n", $nf, $f, $nd, $d, + ($size / 1024 / 1024), ( ($f+$d) / $dur ), fmt_time($dur) ); - $dbh->commit(); + + hest_update($hostID, $shareID, $backupNum); } } @@ -318,7 +493,7 @@ my @data = @_; shift @data; - my ($key, $shareID,undef,$name,$path,undef,$date,undef,$size) = @_; + my ($key, $shareID,undef,$name,$path,$date,undef,$size) = @_; return $beenThere->{$key} if (defined($beenThere->{$key})); @@ -326,15 +501,15 @@ SELECT 1 FROM files WHERE shareID = ? and path = ? and - name = ? and date = ? and size = ? + LIMIT 1 }); - my @param = ($shareID,$path,$name,$date,$size); + my @param = ($shareID,$path,$date,$size); $sth->{file_in_db}->execute(@param); my $rows = $sth->{file_in_db}->rows; - print STDERR "## found_in_db ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3); + print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3); $beenThere->{$key}++; @@ -352,7 +527,7 @@ print STDERR "\nrecurse($hostname,$backupNum,$share,$dir,$shareID)\n" if ($debug >= 1); - my ($nr_files, $new_files, $nr_dirs, $new_dirs) = (0,0,0,0); + my ($nr_files, $new_files, $nr_dirs, $new_dirs, $size) = (0,0,0,0,0); { # scope my @stack; @@ -362,6 +537,7 @@ # first, add all the entries in current directory foreach my $path_key (keys %{$filesInBackup}) { + print STDERR "# file ",Dumper($filesInBackup->{$path_key}),"\n" if ($debug >= 3); my @data = ( $shareID, $backupNum, @@ -380,17 +556,18 @@ $filesInBackup->{$path_key}->{'size'} )); - - if (! defined($beenThere->{$key}) && ! found_in_db($key, @data)) { + my $found; + if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) { print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2); if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) { - $new_dirs++; + $new_dirs++ unless ($found); print STDERR " dir\n" if ($debug >= 2); } else { - $new_files++; + $new_files++ unless ($found); print STDERR " file\n" if ($debug >= 2); } + $size += $filesInBackup->{$path_key}->{'size'} || 0; } if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) { @@ -415,15 +592,16 @@ print STDERR "## STACK ",join(", ", @stack),"\n" if ($debug >= 2); while ( my $dir = shift @stack ) { - my ($f,$nf,$d,$nd) = recurseDir($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID); + my ($f,$nf,$d,$nd, $s) = recurseDir($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID); print STDERR "# $dir f: $f nf: $nf d: $d nd: $nd\n" if ($debug >= 1); $nr_files += $f; $new_files += $nf; $nr_dirs += $d; $new_dirs += $nd; + $size += $s; } } - return ($nr_files, $new_files, $nr_dirs, $new_dirs); + return ($nr_files, $new_files, $nr_dirs, $new_dirs, $size); }