/[BackupPC]/trunk/bin/BackupPC_updatedb
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/bin/BackupPC_updatedb

Parent Directory Parent Directory | Revision Log Revision Log


Revision 199 - (show annotations)
Thu Oct 13 21:19:06 2005 UTC (18 years, 7 months ago) by dpavlin
File size: 15544 byte(s)
 r8512@llin:  dpavlin | 2005-10-13 23:06:07 +0200
 support for multi-parts increments.

1 #!/usr/local/bin/perl -w
2
3 use strict;
4 use lib "__INSTALLDIR__/lib";
5
6 use DBI;
7 use BackupPC::Lib;
8 use BackupPC::View;
9 use Data::Dumper;
10 use Getopt::Std;
11 use Time::HiRes qw/time/;
12 use File::Pid;
13 use POSIX qw/strftime/;
14 use BackupPC::SearchLib;
15
16 use constant BPC_FTYPE_DIR => 5;
17 use constant EST_CHUNK => 100000;
18
19 my $debug = 0;
20 $|=1;
21
22 my $start_t = time();
23
24 my $pidfile = new File::Pid;
25
26 if (my $pid = $pidfile->running ) {
27 die "$0 already running: $pid\n";
28 } elsif ($pidfile->pid ne $$) {
29 $pidfile->remove;
30 $pidfile = new File::Pid;
31 }
32 $pidfile->write;
33 print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n";
34
35 my $t_fmt = '%Y-%m-%d %H:%M:%S';
36
37 my $hosts;
38 my $bpc = BackupPC::Lib->new || die;
39 my %Conf = $bpc->Conf();
40 my $TopDir = $bpc->TopDir();
41 my $beenThere = {};
42
43 my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n";
44 my $user = $Conf{SearchUser} || '';
45
46 my $use_hest = $Conf{HyperEstraierIndex};
47 my ($index_path, $index_node_url) = BackupPC::SearchLib::getHyperEstraier_url($use_hest);
48
49 my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 });
50
51 my %opt;
52
53 if ( !getopts("cdm:v:ij", \%opt ) ) {
54 print STDERR <<EOF;
55 usage: $0 [-c|-d] [-m num] [-v|-v level] [-i]
56
57 Options:
58 -c create database on first use
59 -d delete database before import
60 -m num import just num increments for one host
61 -v num set verbosity (debug) level (default $debug)
62 -i update Hyper Estraier full text index
63 -j update full text, don't check existing files
64
65 Option -j is variation on -i. It will allow faster initial creation
66 of full-text index from existing database.
67
68 EOF
69 exit 1;
70 }
71
72 if ($opt{v}) {
73 print "Debug level at $opt{v}\n";
74 $debug = $opt{v};
75 }
76
77 #---- subs ----
78
79 sub fmt_time {
80 my $t = shift || return;
81 my $out = "";
82 my ($ss,$mm,$hh) = gmtime($t);
83 $out .= "${hh}h" if ($hh);
84 $out .= sprintf("%02d:%02d", $mm,$ss);
85 return $out;
86 }
87
88 sub curr_time {
89 return strftime($t_fmt,localtime());
90 }
91
92 my $hest_db;
93 my $hest_node;
94
95 sub signal {
96 my($sig) = @_;
97 if ($hest_db) {
98 print "\nCaught a SIG$sig--syncing database and shutting down\n";
99 $hest_db->sync();
100 $hest_db->close();
101 }
102 exit(0);
103 }
104
105 $SIG{'INT'} = \&signal;
106 $SIG{'QUIT'} = \&signal;
107
108 sub hest_update {
109
110 my ($host_id, $share_id, $num) = @_;
111
112 my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
113
114 unless ($use_hest) {
115 print STDERR "HyperEstraier support not enabled in configuration\n";
116 return;
117 }
118
119 print curr_time," updating HyperEstraier:";
120
121 my $t = time();
122
123 my $offset = 0;
124 my $added = 0;
125
126 print " opening index $use_hest";
127 if ($index_path) {
128 $hest_db = HyperEstraier::Database->new();
129 $hest_db->open($TopDir . $index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT);
130 print " directly";
131 } elsif ($index_node_url) {
132 $hest_node ||= HyperEstraier::Node->new($index_node_url);
133 $hest_node->set_auth('admin', 'admin');
134 print " via node URL";
135 } else {
136 die "don't know how to use HyperEstraier Index $use_hest";
137 }
138 print " increment is " . EST_CHUNK . " files:";
139
140 my $results = 0;
141
142 do {
143
144 my $where = '';
145 my @data;
146 if (defined($host_id) && defined($share_id) && defined($num)) {
147 $where = qq{
148 WHERE
149 hosts.id = ? AND
150 shares.id = ? AND
151 files.backupnum = ?
152 };
153 @data = ( $host_id, $share_id, $num );
154 }
155
156 my $limit = sprintf('LIMIT '.EST_CHUNK.' OFFSET %d', $offset);
157
158 my $sth = $dbh->prepare(qq{
159 SELECT
160 files.id AS fid,
161 hosts.name AS hname,
162 shares.name AS sname,
163 -- shares.share AS sharename,
164 files.backupnum AS backupnum,
165 -- files.name AS filename,
166 files.path AS filepath,
167 files.date AS date,
168 files.type AS type,
169 files.size AS size,
170 files.shareid AS shareid,
171 backups.date AS backup_date
172 FROM files
173 INNER JOIN shares ON files.shareID=shares.ID
174 INNER JOIN hosts ON hosts.ID = shares.hostID
175 INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID
176 $where
177 $limit
178 });
179
180 $sth->execute(@data);
181 $results = $sth->rows;
182
183 if ($results == 0) {
184 print " - no new files\n";
185 last;
186 }
187
188 sub fmt_date {
189 my $t = shift || return;
190 my $iso = BackupPC::Lib::timeStamp($t);
191 $iso =~ s/\s/T/;
192 return $iso;
193 }
194
195 while (my $row = $sth->fetchrow_hashref()) {
196
197 my $fid = $row->{'fid'} || die "no fid?";
198 my $uri = 'file:///' . $fid;
199
200 unless ($skip_check) {
201 my $id = ($hest_db || $hest_node)->uri_to_id($uri);
202 next unless ($id == -1);
203 }
204
205 # create a document object
206 my $doc = HyperEstraier::Document->new;
207
208 # add attributes to the document object
209 $doc->add_attr('@uri', $uri);
210
211 foreach my $c (@{ $sth->{NAME} }) {
212 $doc->add_attr($c, $row->{$c}) if ($row->{$c});
213 }
214
215 #$doc->add_attr('@cdate', fmt_date($row->{'date'}));
216
217 # add the body text to the document object
218 my $path = $row->{'filepath'};
219 $doc->add_text($path);
220 $path =~ s/(.)/$1 /g;
221 $doc->add_hidden_text($path);
222
223 print STDERR $doc->dump_draft,"\n" if ($debug > 1);
224
225 # register the document object to the database
226 if ($hest_db) {
227 $hest_db->put_doc($doc, $HyperEstraier::Database::PDCLEAN);
228 } elsif ($hest_node) {
229 $hest_node->put_doc($doc);
230 } else {
231 die "not supported";
232 }
233 $added++;
234 }
235
236 print " $added";
237 $hest_db->sync() if ($index_path);
238
239 $offset += EST_CHUNK;
240
241 } while ($results == EST_CHUNK);
242
243 if ($index_path) {
244 print ", close";
245 $hest_db->close();
246 }
247
248 my $dur = (time() - $t) || 1;
249 printf(" [%.2f/s dur: %s]\n",
250 ( $added / $dur ),
251 fmt_time($dur)
252 );
253 }
254
255 #---- /subs ----
256
257
258 ## update index ##
259 if (($opt{i} || $opt{j} || ($index_path && ! -e $index_path)) && !$opt{c}) {
260 # update all
261 print "force update of HyperEstraier index ";
262 print "importing existing data" unless (-e $index_path);
263 print "by -i flag" if ($opt{i});
264 print "by -j flag" if ($opt{j});
265 print "\n";
266 hest_update();
267 }
268
269 ## create tables ##
270 if ($opt{c}) {
271 sub do_index {
272 my $index = shift || return;
273 my ($table,$col,$unique) = split(/:/, $index);
274 $unique ||= '';
275 $index =~ s/\W+/_/g;
276 print "$index on $table($col)" . ( $unique ? "u" : "" ) . " ";
277 $dbh->do(qq{ create $unique index $index on $table($col) });
278 }
279
280 print "creating tables...\n";
281
282 $dbh->do( qq{
283 create table hosts (
284 ID SERIAL PRIMARY KEY,
285 name VARCHAR(30) NOT NULL,
286 IP VARCHAR(15)
287 );
288
289 create table shares (
290 ID SERIAL PRIMARY KEY,
291 hostID INTEGER NOT NULL references hosts(id),
292 name VARCHAR(30) NOT NULL,
293 share VARCHAR(200) NOT NULL
294 );
295
296 create table dvds (
297 ID SERIAL PRIMARY KEY,
298 num INTEGER NOT NULL,
299 name VARCHAR(255) NOT NULL,
300 mjesto VARCHAR(255)
301 );
302
303 create table backups (
304 id serial,
305 hostID INTEGER NOT NULL references hosts(id),
306 num INTEGER NOT NULL,
307 date integer NOT NULL,
308 type CHAR(4) not null,
309 shareID integer not null references shares(id),
310 size bigint not null,
311 inc_size bigint not null default -1,
312 inc_deleted boolean default false,
313 parts integer not null default 1,
314 PRIMARY KEY(id)
315 );
316
317 create table files (
318 ID SERIAL,
319 shareID INTEGER NOT NULL references shares(id),
320 backupNum INTEGER NOT NULL,
321 name VARCHAR(255) NOT NULL,
322 path VARCHAR(255) NOT NULL,
323 date integer NOT NULL,
324 type INTEGER NOT NULL,
325 size bigint NOT NULL,
326 primary key(id)
327 );
328
329 create table archive (
330 id serial,
331 dvd_nr int not null,
332 total_size bigint default -1,
333 note text,
334 username varchar(20) not null,
335 date timestamp default now(),
336 primary key(id)
337 );
338
339 create table archive_backup (
340 archive_id int not null references archive(id) on delete cascade,
341 backup_id int not null references backups(id),
342 primary key(archive_id, backup_id)
343 );
344
345 create table archive_burned (
346 archive_id int references archive(id),
347 date date default now(),
348 part int not null default 1,
349 iso_size bigint default -1
350 );
351
352 });
353
354 print "creating indexes: ";
355
356 foreach my $index (qw(
357 hosts:name
358 backups:hostID
359 backups:num
360 backups:shareID
361 shares:hostID
362 shares:name
363 files:shareID
364 files:path
365 files:name
366 files:date
367 files:size
368 archive:dvd_nr
369 archive_burned:archive_id
370 )) {
371 do_index($index);
372 }
373
374 print " creating sequence: ";
375 foreach my $seq (qw/dvd_nr/) {
376 print "$seq ";
377 $dbh->do( qq{ CREATE SEQUENCE $seq } );
378 }
379
380
381 print "...\n";
382
383 $dbh->commit;
384
385 }
386
387 ## delete data before inseting ##
388 if ($opt{d}) {
389 print "deleting ";
390 foreach my $table (qw(files dvds backups shares hosts)) {
391 print "$table ";
392 $dbh->do(qq{ DELETE FROM $table });
393 }
394 print " done...\n";
395
396 $dbh->commit;
397 }
398
399 ## insert new values ##
400
401 # get hosts
402 $hosts = $bpc->HostInfoRead();
403 my $hostID;
404 my $shareID;
405
406 my $sth;
407
408 $sth->{insert_hosts} = $dbh->prepare(qq{
409 INSERT INTO hosts (name, IP) VALUES (?,?)
410 });
411
412 $sth->{hosts_by_name} = $dbh->prepare(qq{
413 SELECT ID FROM hosts WHERE name=?
414 });
415
416 $sth->{backups_count} = $dbh->prepare(qq{
417 SELECT COUNT(*)
418 FROM backups
419 WHERE hostID=? AND num=? AND shareid=?
420 });
421
422 $sth->{insert_backups} = $dbh->prepare(qq{
423 INSERT INTO backups (hostID, num, date, type, shareid, size)
424 VALUES (?,?,?,?,?,?)
425 });
426
427 $sth->{insert_files} = $dbh->prepare(qq{
428 INSERT INTO files
429 (shareID, backupNum, name, path, date, type, size)
430 VALUES (?,?,?,?,?,?,?)
431 });
432
433 foreach my $host_key (keys %{$hosts}) {
434
435 my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key";
436
437 $sth->{hosts_by_name}->execute($hosts->{$host_key}->{'host'});
438
439 unless (($hostID) = $sth->{hosts_by_name}->fetchrow_array()) {
440 $sth->{insert_hosts}->execute(
441 $hosts->{$host_key}->{'host'},
442 $hosts->{$host_key}->{'ip'}
443 );
444
445 $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef);
446 }
447
448 print "host ".$hosts->{$host_key}->{'host'}.": ";
449
450 # get backups for a host
451 my @backups = $bpc->BackupInfoRead($hostname);
452 my $incs = scalar @backups;
453 print "$incs increments\n";
454
455 my $inc_nr = 0;
456 $beenThere = {};
457
458 foreach my $backup (@backups) {
459
460 $inc_nr++;
461 last if ($opt{m} && $inc_nr > $opt{m});
462
463 my $backupNum = $backup->{'num'};
464 my @backupShares = ();
465
466 printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
467 $hosts->{$host_key}->{'host'},
468 $inc_nr, $incs, $backupNum,
469 $backup->{type} || '?',
470 $backup->{nFilesNew} || '?', $backup->{nFiles} || '?',
471 strftime($t_fmt,localtime($backup->{startTime})),
472 fmt_time($backup->{endTime} - $backup->{startTime})
473 );
474
475 my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1);
476 foreach my $share ($files->shareList($backupNum)) {
477
478 my $t = time();
479
480 $shareID = getShareID($share, $hostID, $hostname);
481
482 $sth->{backups_count}->execute($hostID, $backupNum, $shareID);
483 my ($count) = $sth->{backups_count}->fetchrow_array();
484 # skip if allready in database!
485 next if ($count > 0);
486
487 # dump some log
488 print curr_time," ", $share;
489
490 my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
491
492 $sth->{insert_backups}->execute(
493 $hostID,
494 $backupNum,
495 $backup->{'endTime'},
496 substr($backup->{'type'},0,4),
497 $shareID,
498 $size,
499 );
500
501 print " commit";
502 $dbh->commit();
503
504 my $dur = (time() - $t) || 1;
505 printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",
506 $nf, $f, $nd, $d,
507 ($size / 1024 / 1024),
508 ( ($f+$d) / $dur ),
509 fmt_time($dur)
510 );
511
512 hest_update($hostID, $shareID, $backupNum) if ($nf + $nd > 0);
513 }
514
515 }
516 }
517 undef $sth;
518 $dbh->commit();
519 $dbh->disconnect();
520
521 print "total duration: ",fmt_time(time() - $start_t),"\n";
522
523 $pidfile->remove;
524
525 sub getShareID() {
526
527 my ($share, $hostID, $hostname) = @_;
528
529 $sth->{share_id} ||= $dbh->prepare(qq{
530 SELECT ID FROM shares WHERE hostID=? AND name=?
531 });
532
533 $sth->{share_id}->execute($hostID,$share);
534
535 my ($id) = $sth->{share_id}->fetchrow_array();
536
537 return $id if (defined($id));
538
539 $sth->{insert_share} ||= $dbh->prepare(qq{
540 INSERT INTO shares
541 (hostID,name,share)
542 VALUES (?,?,?)
543 });
544
545 my $drop_down = $hostname . '/' . $share;
546 $drop_down =~ s#//+#/#g;
547
548 $sth->{insert_share}->execute($hostID,$share, $drop_down);
549 return $dbh->last_insert_id(undef,undef,'shares',undef);
550 }
551
552 sub found_in_db {
553
554 my @data = @_;
555 shift @data;
556
557 my ($key, $shareID,undef,$name,$path,$date,undef,$size) = @_;
558
559 return $beenThere->{$key} if (defined($beenThere->{$key}));
560
561 $sth->{file_in_db} ||= $dbh->prepare(qq{
562 SELECT 1 FROM files
563 WHERE shareID = ? and
564 path = ? and
565 date = ? and
566 size = ?
567 LIMIT 1
568 });
569
570 my @param = ($shareID,$path,$date,$size);
571 $sth->{file_in_db}->execute(@param);
572 my $rows = $sth->{file_in_db}->rows;
573 print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3);
574
575 $beenThere->{$key}++;
576
577 $sth->{'insert_files'}->execute(@data) unless ($rows);
578 return $rows;
579 }
580
581 ####################################################
582 # recursing through filesystem structure and #
583 # and returning flattened files list #
584 ####################################################
585 sub recurseDir($$$$$$$$) {
586
587 my ($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID) = @_;
588
589 print STDERR "\nrecurse($hostname,$backupNum,$share,$dir,$shareID)\n" if ($debug >= 1);
590
591 my ($nr_files, $new_files, $nr_dirs, $new_dirs, $size) = (0,0,0,0,0);
592
593 { # scope
594 my @stack;
595
596 print STDERR "# dirAttrib($backupNum, $share, $dir)\n" if ($debug >= 2);
597 my $filesInBackup = $files->dirAttrib($backupNum, $share, $dir);
598
599 # first, add all the entries in current directory
600 foreach my $path_key (keys %{$filesInBackup}) {
601 print STDERR "# file ",Dumper($filesInBackup->{$path_key}),"\n" if ($debug >= 3);
602 my @data = (
603 $shareID,
604 $backupNum,
605 $path_key,
606 $filesInBackup->{$path_key}->{'relPath'},
607 $filesInBackup->{$path_key}->{'mtime'},
608 $filesInBackup->{$path_key}->{'type'},
609 $filesInBackup->{$path_key}->{'size'}
610 );
611
612 my $key = join(" ", (
613 $shareID,
614 $dir,
615 $path_key,
616 $filesInBackup->{$path_key}->{'mtime'},
617 $filesInBackup->{$path_key}->{'size'}
618 ));
619
620 my $found;
621 if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) {
622 print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2);
623
624 if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {
625 $new_dirs++ unless ($found);
626 print STDERR " dir\n" if ($debug >= 2);
627 } else {
628 $new_files++ unless ($found);
629 print STDERR " file\n" if ($debug >= 2);
630 }
631 $size += $filesInBackup->{$path_key}->{'size'} || 0;
632 }
633
634 if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {
635 $nr_dirs++;
636
637 my $full_path = $dir . '/' . $path_key;
638 push @stack, $full_path;
639 print STDERR "### store to stack: $full_path\n" if ($debug >= 3);
640
641 # my ($f,$nf,$d,$nd) = recurseDir($bpc, $hostname, $backups, $backupNum, $share, $path_key, $shareID) unless ($beenThere->{$key});
642 #
643 # $nr_files += $f;
644 # $new_files += $nf;
645 # $nr_dirs += $d;
646 # $new_dirs += $nd;
647
648 } else {
649 $nr_files++;
650 }
651 }
652
653 print STDERR "## STACK ",join(", ", @stack),"\n" if ($debug >= 2);
654
655 while ( my $dir = shift @stack ) {
656 my ($f,$nf,$d,$nd, $s) = recurseDir($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID);
657 print STDERR "# $dir f: $f nf: $nf d: $d nd: $nd\n" if ($debug >= 1);
658 $nr_files += $f;
659 $new_files += $nf;
660 $nr_dirs += $d;
661 $new_dirs += $nd;
662 $size += $s;
663 }
664 }
665
666 return ($nr_files, $new_files, $nr_dirs, $new_dirs, $size);
667 }
668

Properties

Name Value
svn:executable *

  ViewVC Help
Powered by ViewVC 1.1.26