/[BackupPC]/trunk/bin/BackupPC_updatedb
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/bin/BackupPC_updatedb

Parent Directory Parent Directory | Revision Log Revision Log


Revision 329 - (show annotations)
Tue Jan 31 22:04:47 2006 UTC (18 years, 3 months ago) by dpavlin
File size: 19068 byte(s)
 r9171@llin:  dpavlin | 2006-01-31 23:04:30 +0100
 removed all direct updates of parts which is now handled by custom referential triggers on backups and backup_parts (which
 also check part_nr)

1 #!/usr/local/bin/perl -w
2
3 use strict;
4 use lib "__INSTALLDIR__/lib";
5
6 use DBI;
7 use BackupPC::Lib;
8 use BackupPC::View;
9 use Data::Dumper;
10 use Getopt::Std;
11 use Time::HiRes qw/time/;
12 use File::Pid;
13 use POSIX qw/strftime/;
14 use BackupPC::SearchLib;
15 use Cwd qw/abs_path/;
16
17 use constant BPC_FTYPE_DIR => 5;
18 use constant EST_CHUNK => 4096;
19
20 # daylight saving time change offset for 1h
21 my $dst_offset = 60 * 60;
22
23 my $debug = 0;
24 $|=1;
25
26 my $start_t = time();
27
28 my $pid_path = abs_path($0);
29 $pid_path =~ s/\W+/_/g;
30
31 my $pidfile = new File::Pid({
32 file => "/tmp/$pid_path",
33 });
34
35 if (my $pid = $pidfile->running ) {
36 die "$0 already running: $pid\n";
37 } elsif ($pidfile->pid ne $$) {
38 $pidfile->remove;
39 $pidfile = new File::Pid;
40 }
41 print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n";
42 $pidfile->write;
43
44 my $t_fmt = '%Y-%m-%d %H:%M:%S';
45
46 my $hosts;
47 my $bpc = BackupPC::Lib->new || die;
48 my %Conf = $bpc->Conf();
49 my $TopDir = $bpc->TopDir();
50 my $beenThere = {};
51
52 my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n";
53 my $user = $Conf{SearchUser} || '';
54
55 my $index_node_url = $Conf{HyperEstraierIndex};
56
57 my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 });
58
59 my %opt;
60
61 if ( !getopts("cdm:v:ijfq", \%opt ) ) {
62 print STDERR <<EOF;
63 usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f]
64
65 Options:
66 -c create database on first use
67 -d delete database before import
68 -m num import just num increments for one host
69 -v num set verbosity (debug) level (default $debug)
70 -i update Hyper Estraier full text index
71 -j update full text, don't check existing files
72 -f don't do anything with full text index
73 -q be quiet for hosts without changes
74
75 Option -j is variation on -i. It will allow faster initial creation
76 of full-text index from existing database.
77
78 Option -f will create database which is out of sync with full text index. You
79 will have to re-run $0 with -i to fix it.
80
81 EOF
82 exit 1;
83 }
84
85 if ($opt{v}) {
86 print "Debug level at $opt{v}\n";
87 $debug = $opt{v};
88 } elsif ($opt{f}) {
89 print "WARNING: disabling full-text index update. You need to re-run $0 -j !\n";
90 $index_node_url = undef;
91 }
92
93 #---- subs ----
94
95 sub fmt_time {
96 my $t = shift || return;
97 my $out = "";
98 my ($ss,$mm,$hh) = gmtime($t);
99 $out .= "${hh}h" if ($hh);
100 $out .= sprintf("%02d:%02d", $mm,$ss);
101 return $out;
102 }
103
104 sub curr_time {
105 return strftime($t_fmt,localtime());
106 }
107
108 my $hest_node;
109
110 sub hest_update {
111
112 my ($host_id, $share_id, $num) = @_;
113
114 my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
115
116 unless ($index_node_url && $index_node_url =~ m#^http://#) {
117 print STDERR "HyperEstraier support not enabled or index node invalid\n" if ($debug);
118 $index_node_url = 0;
119 return;
120 }
121
122 print curr_time," updating Hyper Estraier:";
123
124 my $t = time();
125
126 my $offset = 0;
127 my $added = 0;
128
129 if ($index_node_url) {
130 print " opening index $index_node_url";
131 $hest_node ||= Search::Estraier::Node->new(
132 url => $index_node_url,
133 user => 'admin',
134 passwd => 'admin',
135 croak_on_error => 1,
136 );
137 print " via node URL";
138 }
139
140 my $results = 0;
141
142 do {
143
144 my $where = '';
145 my @data;
146 if (defined($host_id) && defined($share_id) && defined($num)) {
147 $where = qq{
148 WHERE
149 hosts.id = ? AND
150 shares.id = ? AND
151 files.backupnum = ?
152 };
153 @data = ( $host_id, $share_id, $num );
154 }
155
156 my $limit = sprintf('LIMIT '.EST_CHUNK.' OFFSET %d', $offset);
157
158 my $sth = $dbh->prepare(qq{
159 SELECT
160 files.id AS fid,
161 hosts.name AS hname,
162 shares.name AS sname,
163 -- shares.share AS sharename,
164 files.backupnum AS backupnum,
165 -- files.name AS filename,
166 files.path AS filepath,
167 files.date AS date,
168 files.type AS type,
169 files.size AS size,
170 files.shareid AS shareid,
171 backups.date AS backup_date
172 FROM files
173 INNER JOIN shares ON files.shareID=shares.ID
174 INNER JOIN hosts ON hosts.ID = shares.hostID
175 INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID
176 $where
177 $limit
178 });
179
180 $sth->execute(@data);
181 $results = $sth->rows;
182
183 if ($results == 0) {
184 print " - no new files\n";
185 return;
186 } else {
187 print "...";
188 }
189
190 sub fmt_date {
191 my $t = shift || return;
192 my $iso = BackupPC::Lib::timeStamp($t);
193 $iso =~ s/\s/T/;
194 return $iso;
195 }
196
197 while (my $row = $sth->fetchrow_hashref()) {
198
199 my $uri = $row->{hname} . ':' . $row->{sname} . '#' . $row->{backupnum} . ' ' . $row->{filepath};
200 if (! $skip_check && $hest_node) {
201 my $id = $hest_node->uri_to_id($uri);
202 next if ($id && $id == -1);
203 }
204
205 # create a document object
206 my $doc = Search::Estraier::Document->new;
207
208 # add attributes to the document object
209 $doc->add_attr('@uri', $uri);
210
211 foreach my $c (@{ $sth->{NAME} }) {
212 print STDERR "attr $c = $row->{$c}\n" if ($debug > 2);
213 $doc->add_attr($c, $row->{$c}) if (defined($row->{$c}));
214 }
215
216 #$doc->add_attr('@cdate', fmt_date($row->{'date'}));
217
218 # add the body text to the document object
219 my $path = $row->{'filepath'};
220 $doc->add_text($path);
221 $path =~ s/(.)/$1 /g;
222 $doc->add_hidden_text($path);
223
224 print STDERR $doc->dump_draft,"\n" if ($debug > 1);
225
226 # register the document object to the database
227 $hest_node->put_doc($doc) if ($hest_node);
228
229 $added++;
230 }
231
232 print "$added";
233
234 $offset += EST_CHUNK;
235
236 } while ($results == EST_CHUNK);
237
238 my $dur = (time() - $t) || 1;
239 printf(" [%.2f/s dur: %s]\n",
240 ( $added / $dur ),
241 fmt_time($dur)
242 );
243 }
244
245 #---- /subs ----
246
247
248 ## update index ##
249 if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) {
250 # update all
251 print "force update of Hyper Estraier index ";
252 print "by -i flag" if ($opt{i});
253 print "by -j flag" if ($opt{j});
254 print "\n";
255 hest_update();
256 }
257
258 ## create tables ##
259 if ($opt{c}) {
260 sub do_index {
261 my $index = shift || return;
262 my ($table,$col,$unique) = split(/:/, $index);
263 $unique ||= '';
264 $index =~ s/\W+/_/g;
265 print "$index on $table($col)" . ( $unique ? "u" : "" ) . " ";
266 $dbh->do(qq{ create $unique index $index on $table($col) });
267 }
268
269 print "creating tables...\n";
270
271 $dbh->do( qq{
272 create table hosts (
273 ID SERIAL PRIMARY KEY,
274 name VARCHAR(30) NOT NULL,
275 IP VARCHAR(15)
276 );
277
278 create table shares (
279 ID SERIAL PRIMARY KEY,
280 hostID INTEGER NOT NULL references hosts(id),
281 name VARCHAR(30) NOT NULL,
282 share VARCHAR(200) NOT NULL
283 );
284
285 create table dvds (
286 ID SERIAL PRIMARY KEY,
287 num INTEGER NOT NULL,
288 name VARCHAR(255) NOT NULL,
289 mjesto VARCHAR(255)
290 );
291
292 create table backups (
293 id serial,
294 hostID INTEGER NOT NULL references hosts(id),
295 num INTEGER NOT NULL,
296 date integer NOT NULL,
297 type CHAR(4) not null,
298 shareID integer not null references shares(id),
299 size bigint not null,
300 inc_size bigint not null default -1,
301 inc_deleted boolean default false,
302 parts integer not null default 0,
303 PRIMARY KEY(id)
304 );
305
306 create table files (
307 ID SERIAL,
308 shareID INTEGER NOT NULL references shares(id),
309 backupNum INTEGER NOT NULL,
310 name VARCHAR(255) NOT NULL,
311 path VARCHAR(255) NOT NULL,
312 date integer NOT NULL,
313 type INTEGER NOT NULL,
314 size bigint NOT NULL,
315 primary key(id)
316 );
317
318 create table archive (
319 id serial,
320 dvd_nr int not null,
321 total_size bigint default -1,
322 note text,
323 username varchar(20) not null,
324 date timestamp default now(),
325 primary key(id)
326 );
327
328 create table archive_backup (
329 archive_id int not null references archive(id) on delete cascade,
330 backup_id int not null references backups(id),
331 primary key(archive_id, backup_id)
332 );
333
334 create table archive_burned (
335 archive_id int references archive(id),
336 date timestamp default now(),
337 part int not null default 1,
338 copy int not null default 1,
339 iso_size bigint default -1
340 );
341
342 create table backup_parts (
343 id serial,
344 backup_id int references backups(id),
345 part_nr int not null check (part_nr > 0),
346 tar_size bigint not null check (tar_size > 0),
347 size bigint not null check (size > 0),
348 md5 text not null,
349 items int not null check (items > 0),
350 date timestamp default now(),
351 primary key(id)
352 );
353 });
354
355 print "creating indexes: ";
356
357 foreach my $index (qw(
358 hosts:name
359 backups:hostID
360 backups:num
361 backups:shareID
362 shares:hostID
363 shares:name
364 files:shareID
365 files:path
366 files:name
367 files:date
368 files:size
369 archive:dvd_nr
370 archive_burned:archive_id
371 backup_parts:backup_id,part_nr
372 )) {
373 do_index($index);
374 }
375
376 print " creating sequence: ";
377 foreach my $seq (qw/dvd_nr/) {
378 print "$seq ";
379 $dbh->do( qq{ CREATE SEQUENCE $seq } );
380 }
381
382 print " creating triggers ";
383 $dbh->do( <<__END_OF_TRIGGER__ );
384
385 create or replace function backup_parts_check() returns trigger as '
386 declare
387 b_parts integer;
388 b_counted integer;
389 b_id integer;
390 begin
391 -- raise notice ''old/new parts %/% backup_id %/%'', old.parts, new.parts, old.id, new.id;
392 if (TG_OP=''UPDATE'') then
393 b_id := new.id;
394 b_parts := new.parts;
395 elsif (TG_OP = ''INSERT'') then
396 b_id := new.id;
397 b_parts := new.parts;
398 end if;
399 b_counted := (select count(*) from backup_parts where backup_id = b_id);
400 -- raise notice ''backup % parts %'', b_id, b_parts;
401 if ( b_parts != b_counted ) then
402 raise exception ''Update of backup % aborted, requested % parts and there are really % parts'', b_id, b_parts, b_counted;
403 end if;
404 return null;
405 end;
406 ' language plpgsql;
407
408 create trigger do_backup_parts_check
409 after insert or update or delete on backups
410 for each row execute procedure backup_parts_check();
411
412 create or replace function backup_backup_parts_check() returns trigger as '
413 declare
414 b_id integer;
415 my_part_nr integer;
416 calc_part integer;
417 begin
418 if (TG_OP = ''INSERT'') then
419 -- raise notice ''trigger: % backup_id %'', TG_OP, new.backup_id;
420 b_id = new.backup_id;
421 my_part_nr = new.part_nr;
422 execute ''update backups set parts = parts + 1 where id = '' || b_id;
423 elsif (TG_OP = ''DELETE'') then
424 -- raise notice ''trigger: % backup_id %'', TG_OP, old.backup_id;
425 b_id = old.backup_id;
426 my_part_nr = old.part_nr;
427 execute ''update backups set parts = parts - 1 where id = '' || b_id;
428 end if;
429 calc_part := (select count(part_nr) from backup_parts where backup_id = b_id);
430 if ( my_part_nr != calc_part ) then
431 raise exception ''Update of backup_parts with backup_id % aborted, requested part_nr is % and calulated next is %'', b_id, my_part_nr, calc_part;
432 end if;
433 return null;
434 end;
435 ' language plpgsql;
436
437 create trigger do_backup_backup_parts_check
438 after insert or update or delete on backup_parts
439 for each row execute procedure backup_backup_parts_check();
440
441 __END_OF_TRIGGER__
442
443 print "...\n";
444
445 $dbh->commit;
446
447 }
448
449 ## delete data before inseting ##
450 if ($opt{d}) {
451 print "deleting ";
452 foreach my $table (qw(files dvds backups shares hosts)) {
453 print "$table ";
454 $dbh->do(qq{ DELETE FROM $table });
455 }
456 print " done...\n";
457
458 $dbh->commit;
459 }
460
461 ## insert new values ##
462
463 # get hosts
464 $hosts = $bpc->HostInfoRead();
465 my $hostID;
466 my $shareID;
467
468 my $sth;
469
470 $sth->{insert_hosts} = $dbh->prepare(qq{
471 INSERT INTO hosts (name, IP) VALUES (?,?)
472 });
473
474 $sth->{hosts_by_name} = $dbh->prepare(qq{
475 SELECT ID FROM hosts WHERE name=?
476 });
477
478 $sth->{backups_count} = $dbh->prepare(qq{
479 SELECT COUNT(*)
480 FROM backups
481 WHERE hostID=? AND num=? AND shareid=?
482 });
483
484 $sth->{insert_backups} = $dbh->prepare(qq{
485 INSERT INTO backups (hostID, num, date, type, shareid, size)
486 VALUES (?,?,?,?,?,-1)
487 });
488
489 $sth->{update_backups_size} = $dbh->prepare(qq{
490 UPDATE backups SET size = ?
491 WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ?
492 });
493
494 $sth->{insert_files} = $dbh->prepare(qq{
495 INSERT INTO files
496 (shareID, backupNum, name, path, date, type, size)
497 VALUES (?,?,?,?,?,?,?)
498 });
499
500 my @hosts = keys %{$hosts};
501 my $host_nr = 0;
502
503 foreach my $host_key (@hosts) {
504
505 my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key";
506
507 $sth->{hosts_by_name}->execute($hosts->{$host_key}->{'host'});
508
509 unless (($hostID) = $sth->{hosts_by_name}->fetchrow_array()) {
510 $sth->{insert_hosts}->execute(
511 $hosts->{$host_key}->{'host'},
512 $hosts->{$host_key}->{'ip'}
513 );
514
515 $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef);
516 }
517
518 $host_nr++;
519 # get backups for a host
520 my @backups = $bpc->BackupInfoRead($hostname);
521 my $incs = scalar @backups;
522
523 my $host_header = sprintf("host %s [%d/%d]: %d increments\n",
524 $hosts->{$host_key}->{'host'},
525 $host_nr,
526 ($#hosts + 1),
527 $incs
528 );
529 print $host_header unless ($opt{q});
530
531 my $inc_nr = 0;
532 $beenThere = {};
533
534 foreach my $backup (@backups) {
535
536 $inc_nr++;
537 last if ($opt{m} && $inc_nr > $opt{m});
538
539 my $backupNum = $backup->{'num'};
540 my @backupShares = ();
541
542 my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
543 $hosts->{$host_key}->{'host'},
544 $inc_nr, $incs, $backupNum,
545 $backup->{type} || '?',
546 $backup->{nFilesNew} || '?', $backup->{nFiles} || '?',
547 strftime($t_fmt,localtime($backup->{startTime})),
548 fmt_time($backup->{endTime} - $backup->{startTime})
549 );
550 print $share_header unless ($opt{q});
551
552 my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1);
553 foreach my $share ($files->shareList($backupNum)) {
554
555 my $t = time();
556
557 $shareID = getShareID($share, $hostID, $hostname);
558
559 $sth->{backups_count}->execute($hostID, $backupNum, $shareID);
560 my ($count) = $sth->{backups_count}->fetchrow_array();
561 # skip if allready in database!
562 next if ($count > 0);
563
564 # dump host and share header for -q
565 if ($opt{q}) {
566 if ($host_header) {
567 print $host_header;
568 $host_header = undef;
569 }
570 print $share_header;
571 }
572
573 # dump some log
574 print curr_time," ", $share;
575
576 $sth->{insert_backups}->execute(
577 $hostID,
578 $backupNum,
579 $backup->{'endTime'},
580 substr($backup->{'type'},0,4),
581 $shareID,
582 );
583
584 my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
585
586 eval {
587 $sth->{update_backups_size}->execute(
588 $size,
589 $hostID,
590 $backupNum,
591 $backup->{'endTime'},
592 substr($backup->{'type'},0,4),
593 $shareID,
594 );
595 print " commit";
596 $dbh->commit();
597 };
598 if ($@) {
599 print " rollback";
600 $dbh->rollback();
601 }
602
603 my $dur = (time() - $t) || 1;
604 printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",
605 $nf, $f, $nd, $d,
606 ($size / 1024 / 1024),
607 ( ($f+$d) / $dur ),
608 fmt_time($dur)
609 );
610
611 hest_update($hostID, $shareID, $backupNum) if ($nf + $nd > 0);
612 }
613
614 }
615 }
616 undef $sth;
617 $dbh->commit();
618 $dbh->disconnect();
619
620 print "total duration: ",fmt_time(time() - $start_t),"\n";
621
622 $pidfile->remove;
623
624 sub getShareID() {
625
626 my ($share, $hostID, $hostname) = @_;
627
628 $sth->{share_id} ||= $dbh->prepare(qq{
629 SELECT ID FROM shares WHERE hostID=? AND name=?
630 });
631
632 $sth->{share_id}->execute($hostID,$share);
633
634 my ($id) = $sth->{share_id}->fetchrow_array();
635
636 return $id if (defined($id));
637
638 $sth->{insert_share} ||= $dbh->prepare(qq{
639 INSERT INTO shares
640 (hostID,name,share)
641 VALUES (?,?,?)
642 });
643
644 my $drop_down = $hostname . '/' . $share;
645 $drop_down =~ s#//+#/#g;
646
647 $sth->{insert_share}->execute($hostID,$share, $drop_down);
648 return $dbh->last_insert_id(undef,undef,'shares',undef);
649 }
650
651 sub found_in_db {
652
653 my @data = @_;
654 shift @data;
655
656 my ($key, $shareID,undef,$name,$path,$date,undef,$size) = @_;
657
658 return $beenThere->{$key} if (defined($beenThere->{$key}));
659
660 $sth->{file_in_db} ||= $dbh->prepare(qq{
661 SELECT 1 FROM files
662 WHERE shareID = ? and
663 path = ? and
664 size = ? and
665 ( date = ? or date = ? or date = ? )
666 LIMIT 1
667 });
668
669 my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset);
670 $sth->{file_in_db}->execute(@param);
671 my $rows = $sth->{file_in_db}->rows;
672 print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3);
673
674 $beenThere->{$key}++;
675
676 $sth->{'insert_files'}->execute(@data) unless ($rows);
677 return $rows;
678 }
679
680 ####################################################
681 # recursing through filesystem structure and #
682 # and returning flattened files list #
683 ####################################################
684 sub recurseDir($$$$$$$$) {
685
686 my ($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID) = @_;
687
688 print STDERR "\nrecurse($hostname,$backupNum,$share,$dir,$shareID)\n" if ($debug >= 1);
689
690 my ($nr_files, $new_files, $nr_dirs, $new_dirs, $size) = (0,0,0,0,0);
691
692 { # scope
693 my @stack;
694
695 print STDERR "# dirAttrib($backupNum, $share, $dir)\n" if ($debug >= 2);
696 my $filesInBackup = $files->dirAttrib($backupNum, $share, $dir);
697
698 # first, add all the entries in current directory
699 foreach my $path_key (keys %{$filesInBackup}) {
700 print STDERR "# file ",Dumper($filesInBackup->{$path_key}),"\n" if ($debug >= 3);
701 my @data = (
702 $shareID,
703 $backupNum,
704 $path_key,
705 $filesInBackup->{$path_key}->{'relPath'},
706 $filesInBackup->{$path_key}->{'mtime'},
707 $filesInBackup->{$path_key}->{'type'},
708 $filesInBackup->{$path_key}->{'size'}
709 );
710
711 my $key = join(" ", (
712 $shareID,
713 $dir,
714 $path_key,
715 $filesInBackup->{$path_key}->{'mtime'},
716 $filesInBackup->{$path_key}->{'size'}
717 ));
718
719 my $key_dst_prev = join(" ", (
720 $shareID,
721 $dir,
722 $path_key,
723 $filesInBackup->{$path_key}->{'mtime'} - $dst_offset,
724 $filesInBackup->{$path_key}->{'size'}
725 ));
726
727 my $key_dst_next = join(" ", (
728 $shareID,
729 $dir,
730 $path_key,
731 $filesInBackup->{$path_key}->{'mtime'} + $dst_offset,
732 $filesInBackup->{$path_key}->{'size'}
733 ));
734
735 my $found;
736 if (
737 ! defined($beenThere->{$key}) &&
738 ! defined($beenThere->{$key_dst_prev}) &&
739 ! defined($beenThere->{$key_dst_next}) &&
740 ! ($found = found_in_db($key, @data))
741 ) {
742 print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2);
743
744 if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {
745 $new_dirs++ unless ($found);
746 print STDERR " dir\n" if ($debug >= 2);
747 } else {
748 $new_files++ unless ($found);
749 print STDERR " file\n" if ($debug >= 2);
750 }
751 $size += $filesInBackup->{$path_key}->{'size'} || 0;
752 }
753
754 if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {
755 $nr_dirs++;
756
757 my $full_path = $dir . '/' . $path_key;
758 push @stack, $full_path;
759 print STDERR "### store to stack: $full_path\n" if ($debug >= 3);
760
761 # my ($f,$nf,$d,$nd) = recurseDir($bpc, $hostname, $backups, $backupNum, $share, $path_key, $shareID) unless ($beenThere->{$key});
762 #
763 # $nr_files += $f;
764 # $new_files += $nf;
765 # $nr_dirs += $d;
766 # $new_dirs += $nd;
767
768 } else {
769 $nr_files++;
770 }
771 }
772
773 print STDERR "## STACK ",join(", ", @stack),"\n" if ($debug >= 2);
774
775 while ( my $dir = shift @stack ) {
776 my ($f,$nf,$d,$nd, $s) = recurseDir($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID);
777 print STDERR "# $dir f: $f nf: $nf d: $d nd: $nd\n" if ($debug >= 1);
778 $nr_files += $f;
779 $new_files += $nf;
780 $nr_dirs += $d;
781 $new_dirs += $nd;
782 $size += $s;
783 }
784 }
785
786 return ($nr_files, $new_files, $nr_dirs, $new_dirs, $size);
787 }
788

Properties

Name Value
svn:executable *

  ViewVC Help
Powered by ViewVC 1.1.26