/[BackupPC]/trunk/bin/BackupPC_tarIncCreate
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/bin/BackupPC_tarIncCreate

Parent Directory Parent Directory | Revision Log Revision Log


Revision 329 - (show annotations)
Tue Jan 31 22:04:47 2006 UTC (18 years, 3 months ago) by dpavlin
File size: 23648 byte(s)
 r9171@llin:  dpavlin | 2006-01-31 23:04:30 +0100
 removed all direct updates of parts which is now handled by custom referential triggers on backups and backup_parts (which
 also check part_nr)

1 #!/usr/bin/perl -w
2 #============================================================= -*-perl-*-
3 #
4 # BackupPC_tarIncCreate: create a tar archive of an existing incremental dump
5 #
6 #
7 # DESCRIPTION
8 #
9 # Usage: BackupPC_tarIncCreate [options]
10 #
11 # Flags:
12 # Required options:
13 #
14 # -h host Host from which the tar archive is created.
15 # -n dumpNum Dump number from which the tar archive is created.
16 # A negative number means relative to the end (eg -1
17 # means the most recent dump, -2 2nd most recent etc).
18 # -s shareName Share name from which the tar archive is created.
19 #
20 # Other options:
21 # -t print summary totals
22 # -r pathRemove path prefix that will be replaced with pathAdd
23 # -p pathAdd new path prefix
24 # -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
25 # -w writeBufSz write buffer size (default 1MB)
26 #
27 # The -h, -n and -s options specify which dump is used to generate
28 # the tar archive. The -r and -p options can be used to relocate
29 # the paths in the tar archive so extracted files can be placed
30 # in a location different from their original location.
31 #
32 # AUTHOR
33 # Craig Barratt <cbarratt@users.sourceforge.net>
34 # Ivan Klaric <iklaric@gmail.com>
35 # Dobrica Pavlinusic <dpavlin@rot13.org>
36 #
37 # COPYRIGHT
38 # Copyright (C) 2001-2003 Craig Barratt
39 #
40 # This program is free software; you can redistribute it and/or modify
41 # it under the terms of the GNU General Public License as published by
42 # the Free Software Foundation; either version 2 of the License, or
43 # (at your option) any later version.
44 #
45 # This program is distributed in the hope that it will be useful,
46 # but WITHOUT ANY WARRANTY; without even the implied warranty of
47 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
48 # GNU General Public License for more details.
49 #
50 # You should have received a copy of the GNU General Public License
51 # along with this program; if not, write to the Free Software
52 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
53 #
54 #========================================================================
55 #
56 # Version 2.1.0, released 20 Jun 2004.
57 #
58 # See http://backuppc.sourceforge.net.
59 #
60 #========================================================================
61
62 use strict;
63 no utf8;
64 use lib "__INSTALLDIR__/lib";
65 use File::Path;
66 use Getopt::Std;
67 use DBI;
68 use BackupPC::Lib;
69 use BackupPC::Attrib qw(:all);
70 use BackupPC::FileZIO;
71 use BackupPC::View;
72 use BackupPC::SearchLib;
73 use Time::HiRes qw/time/;
74 use POSIX qw/strftime/;
75 use File::Which;
76 use File::Path;
77 use File::Slurp;
78 use Data::Dumper; ### FIXME
79
80 die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
81 my $TopDir = $bpc->TopDir();
82 my $BinDir = $bpc->BinDir();
83 my %Conf = $bpc->Conf();
84 %BackupPC::SearchLib::Conf = %Conf;
85 my %opts;
86 my $in_backup_increment;
87
88
89 if ( !getopts("th:n:p:r:s:b:w:vdf", \%opts) ) {
90 print STDERR <<EOF;
91 usage: $0 [options]
92 Required options:
93 -h host host from which the tar archive is created
94 -n dumpNum dump number from which the tar archive is created
95 A negative number means relative to the end (eg -1
96 means the most recent dump, -2 2nd most recent etc).
97 -s shareName share name from which the tar archive is created
98
99 Other options:
100 -t print summary totals
101 -r pathRemove path prefix that will be replaced with pathAdd
102 -p pathAdd new path prefix
103 -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
104 -w writeBufSz write buffer size (default 1048576 = 1MB)
105 -f overwrite existing parts
106 -v verbose output
107 -d debug output
108 EOF
109 exit(1);
110 }
111
112 if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) {
113 die "$0: bad host name '$opts{h}'\n";
114 }
115 my $Host = $opts{h};
116
117 if ( $opts{n} !~ /^(-?\d+)$/ ) {
118 die "$0: bad dump number '$opts{n}'\n";
119 }
120 my $Num = $opts{n};
121
122 my $bin;
123 foreach my $c (qw/gzip md5sum tee/) {
124 $bin->{$c} = which($c) || die "$0 needs $c, install it\n";
125 }
126
127 my @Backups = $bpc->BackupInfoRead($Host);
128 my $FileCnt = 0;
129 my $ByteCnt = 0;
130 my $DirCnt = 0;
131 my $SpecialCnt = 0;
132 my $ErrorCnt = 0;
133 my $current_tar_size = 0;
134 my $total_increment_size = 0;
135
136 my $i;
137 $Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 );
138 for ( $i = 0 ; $i < @Backups ; $i++ ) {
139 last if ( $Backups[$i]{num} == $Num );
140 }
141 if ( $i >= @Backups ) {
142 die "$0: bad backup number $Num for host $Host\n";
143 }
144
145 my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ );
146 my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ );
147 if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) {
148 die "$0: bad share name '$opts{s}'\n";
149 }
150 our $ShareName = $opts{s};
151 our $view = BackupPC::View->new($bpc, $Host, \@Backups);
152
153 # database
154
155 my $dsn = $Conf{SearchDSN};
156 my $db_user = $Conf{SearchUser} || '';
157
158 my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 0} );
159
160 my $sth_inc_size = $dbh->prepare(qq{
161 update backups set
162 inc_size = ?,
163 inc_deleted = false
164 where id = ?
165 });
166 my $sth_backup_parts = $dbh->prepare(qq{
167 insert into backup_parts (
168 backup_id,
169 part_nr,
170 tar_size,
171 size,
172 md5,
173 items
174 ) values (?,?,?,?,?,?)
175 });
176
177 #
178 # This constant and the line of code below that uses it are borrowed
179 # from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander.
180 # See www.cpan.org.
181 #
182 # Archive::Tar is Copyright 1997 Calle Dybedahl. All rights reserved.
183 # Copyright 1998 Stephen Zander. All rights reserved.
184 #
185 my $tar_pack_header
186 = 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a6 a2 a32 a32 a8 a8 a155 x12';
187 my $tar_header_length = 512;
188
189 my $BufSize = $opts{w} || 1048576; # 1MB or 2^20
190 my $WriteBuf = "";
191 my $WriteBufSz = ($opts{b} || 20) * $tar_header_length;
192
193 my(%UidCache, %GidCache);
194 my(%HardLinkExtraFiles, @HardLinks);
195
196 #
197 # Write out all the requested files/directories
198 #
199
200 my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar";
201
202 my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir};
203 die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir);
204
205 my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)";
206
207 my $tar_path_final = $tar_dir . '/' . $tar_file;
208 my $tar_path = $tar_path_final . '.tmp';
209
210 $tar_path =~ s#//#/#g;
211
212 my $sth = $dbh->prepare(qq{
213 SELECT
214 backups.id
215 FROM backups
216 JOIN shares on shares.id = shareid
217 JOIN hosts on hosts.id = shares.hostid
218 WHERE hosts.name = ? and shares.name = ? and backups.num = ?
219 });
220 $sth->execute($Host, $ShareName, $Num);
221 my ($backup_id) = $sth->fetchrow_array;
222 $sth->finish;
223
224
225 # delete exising backup_parts
226 my $sth_delete_backup_parts = $dbh->prepare(qq{
227 delete from backup_parts
228 where backup_id = ?
229 });
230 $sth_delete_backup_parts->execute($backup_id);
231
232
233 print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d});
234
235 if (-e $tar_path_final) {
236 if ($opts{f}) {
237 rmtree $tar_path_final || die "can't remove $tar_path_final: $!";
238 } else {
239 die "$tar_path_final allready exists\n";
240 }
241 }
242
243 my $fh;
244 my $part = 0;
245 my $no_files = 0;
246 my $items_in_part = 0;
247
248 sub new_tar_part {
249 my $arg = {@_};
250
251 if ($fh) {
252 return if ($current_tar_size == 0);
253
254 print STDERR "\n\t+ $part:";
255
256 #
257 # Finish with two null 512 byte headers,
258 # and then round out a full block.
259 #
260 my $data = "\0" x ($tar_header_length * 2);
261 TarWrite($fh, \$data);
262 TarWrite($fh, undef);
263
264 close($fh) || die "can't close archive part $part: $!";
265
266 my $file = $tar_path . '/' . $part;
267
268 my $md5 = read_file( $file . '.md5' ) || die "can't read md5sum file ${file}.md5";
269 $md5 =~ s/\s.*$//;
270
271 my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz";
272
273 print "$file, $size bytes, $items_in_part items";
274
275 $sth_backup_parts->execute(
276 $backup_id,
277 $part,
278 $current_tar_size,
279 $size,
280 $md5,
281 $items_in_part,
282 );
283
284 $total_increment_size += $size;
285
286 if ($arg->{close}) {
287
288 sub move($$) {
289 my ($from,$to) = @_;
290 print STDERR "# rename $from -> $to\n" if ($opts{d});
291 rename $from, $to || die "can't move $from -> $to: $!\n";
292 }
293
294 if ($part == 1) {
295 print STDERR " single" if ($opts{v});
296 move("${tar_path}/1.tar.gz", "${tar_path_final}.tar.gz");
297 move("${tar_path}/1.md5", "${tar_path_final}.md5");
298 rmtree $tar_path or die "can't remove temporary dir $tar_path: $!";
299 } else {
300 print STDERR " [last]" if ($opts{v});
301 move("${tar_path}", "${tar_path_final}");
302
303 # if this archive was single part, remove it
304 foreach my $suffix (qw/.tar.gz .md5/) {
305 my $path = $tar_path_final . $suffix;
306 unlink $path if (-e $path);
307 }
308 }
309
310 $sth_inc_size->execute(
311 $total_increment_size,
312 $backup_id
313 );
314
315 print "\n\ttotal $total_increment_size bytes";
316
317 return;
318 }
319
320 }
321
322 $part++;
323
324 # if this is first part, create directory
325
326 if ($part == 1) {
327 if (-e $tar_path) {
328 print STDERR "# deleting existing $tar_path\n" if ($opts{d});
329 rmtree($tar_path);
330 }
331 mkdir($tar_path) || die "can't create directory $tar_path: $!";
332
333 sub abort_cleanup {
334 print STDERR "ABORTED: cleanup temp dir ";
335 rmtree($tar_path);
336 $dbh->rollback;
337 exit 1;
338 }
339
340 $SIG{'INT'} = \&abort_cleanup;
341 $SIG{'QUIT'} = \&abort_cleanup;
342 $SIG{'__DIE__'} = \&abort_cleanup;
343
344 }
345
346 my $file = $tar_path . '/' . $part;
347
348 #
349 # create comprex pipe which will pass output through gzip
350 # for compression, create file on disk using tee
351 # and pipe same output to md5sum to create checksum
352 #
353
354 my $cmd = '| ' . $bin->{'gzip'} . ' ' . $Conf{GzipLevel} . ' ' .
355 '| ' . $bin->{'tee'} . ' ' . $file . '.tar.gz' . ' ' .
356 '| ' . $bin->{'md5sum'} . ' - > ' . $file . '.md5';
357
358 print STDERR "## $cmd\n" if ($opts{d});
359
360 open($fh, $cmd) or die "can't open $cmd: $!";
361 binmode($fh);
362
363 $current_tar_size = 0;
364 $items_in_part = 0;
365 }
366
367 new_tar_part();
368
369 if (seedCache($Host, $ShareName, $Num)) {
370 archiveWrite($fh, '/');
371 archiveWriteHardLinks($fh);
372 new_tar_part( close => 1 );
373 } else {
374 print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v});
375 # remove temporary files if there are no files
376 rmtree($tar_path);
377
378 my $sth = $dbh->prepare(qq{
379 update backups set inc_size = 0, inc_deleted = true
380 where id = ?
381 });
382 $sth->execute($backup_id);
383
384 }
385
386 #
387 # print out totals if requested
388 #
389 if ( $opts{t} ) {
390 print STDERR "Done: $FileCnt files, $ByteCnt bytes, $DirCnt dirs,",
391 " $SpecialCnt specials, $ErrorCnt errors\n";
392 }
393 if ( $ErrorCnt && !$FileCnt && !$DirCnt ) {
394 #
395 # Got errors, with no files or directories; exit with non-zero
396 # status
397 #
398 die "got errors or no files\n";
399 }
400
401 $sth_inc_size->finish;
402 $sth_backup_parts->finish;
403
404 $dbh->commit || die "can't commit changes to database";
405 $dbh->disconnect();
406
407 exit;
408
409 ###########################################################################
410 # Subroutines
411 ###########################################################################
412
413 sub archiveWrite
414 {
415 my($fh, $dir, $tarPathOverride) = @_;
416
417 if ( $dir =~ m{(^|/)\.\.(/|$)} ) {
418 print(STDERR "$0: bad directory '$dir'\n");
419 $ErrorCnt++;
420 return;
421 }
422 $dir = "/" if ( $dir eq "." );
423 #print(STDERR "calling find with $Num, $ShareName, $dir\n");
424
425 if ( $view->find($Num, $ShareName, $dir, 0, \&TarWriteFile,
426 $fh, $tarPathOverride) < 0 ) {
427 print(STDERR "$0: bad share or directory '$ShareName/$dir'\n");
428 $ErrorCnt++;
429 return;
430 }
431 }
432
433 #
434 # Write out any hardlinks (if any)
435 #
436 sub archiveWriteHardLinks
437 {
438 my $fh = @_;
439 foreach my $hdr ( @HardLinks ) {
440 $hdr->{size} = 0;
441 if ( defined($PathRemove)
442 && substr($hdr->{linkname}, 0, length($PathRemove)+1)
443 eq ".$PathRemove" ) {
444 substr($hdr->{linkname}, 0, length($PathRemove)+1) = ".$PathAdd";
445 }
446 TarWriteFileInfo($fh, $hdr);
447 }
448 @HardLinks = ();
449 %HardLinkExtraFiles = ();
450 }
451
452 sub UidLookup
453 {
454 my($uid) = @_;
455
456 $UidCache{$uid} = (getpwuid($uid))[0] if ( !exists($UidCache{$uid}) );
457 return $UidCache{$uid};
458 }
459
460 sub GidLookup
461 {
462 my($gid) = @_;
463
464 $GidCache{$gid} = (getgrgid($gid))[0] if ( !exists($GidCache{$gid}) );
465 return $GidCache{$gid};
466 }
467
468 sub TarWrite
469 {
470 my($fh, $dataRef) = @_;
471
472
473 if ( !defined($dataRef) ) {
474 #
475 # do flush by padding to a full $WriteBufSz
476 #
477 my $data = "\0" x ($WriteBufSz - length($WriteBuf));
478 $dataRef = \$data;
479 }
480
481 # poor man's tell :-)
482 $current_tar_size += length($$dataRef);
483
484 if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) {
485 #
486 # just buffer and return
487 #
488 $WriteBuf .= $$dataRef;
489 return;
490 }
491 my $done = $WriteBufSz - length($WriteBuf);
492 if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done))
493 != $WriteBufSz ) {
494 die "Unable to write to output file ($!)\n";
495 }
496 while ( $done + $WriteBufSz <= length($$dataRef) ) {
497 if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz))
498 != $WriteBufSz ) {
499 die "Unable to write to output file ($!)\n";
500 }
501 $done += $WriteBufSz;
502 }
503 $WriteBuf = substr($$dataRef, $done);
504 }
505
506 sub TarWritePad
507 {
508 my($fh, $size) = @_;
509
510 if ( $size % $tar_header_length ) {
511 my $data = "\0" x ($tar_header_length - ($size % $tar_header_length));
512 TarWrite($fh, \$data);
513 }
514 }
515
516 sub TarWriteHeader
517 {
518 my($fh, $hdr) = @_;
519
520 $hdr->{uname} = UidLookup($hdr->{uid}) if ( !defined($hdr->{uname}) );
521 $hdr->{gname} = GidLookup($hdr->{gid}) if ( !defined($hdr->{gname}) );
522 my $devmajor = defined($hdr->{devmajor}) ? sprintf("%07o", $hdr->{devmajor})
523 : "";
524 my $devminor = defined($hdr->{devminor}) ? sprintf("%07o", $hdr->{devminor})
525 : "";
526 my $sizeStr;
527 if ( $hdr->{size} >= 2 * 65536 * 65536 ) {
528 #
529 # GNU extension for files >= 8GB: send size in big-endian binary
530 #
531 $sizeStr = pack("c4 N N", 0x80, 0, 0, 0,
532 $hdr->{size} / (65536 * 65536),
533 $hdr->{size} % (65536 * 65536));
534 } elsif ( $hdr->{size} >= 1 * 65536 * 65536 ) {
535 #
536 # sprintf octal only handles up to 2^32 - 1
537 #
538 $sizeStr = sprintf("%03o", $hdr->{size} / (1 << 24))
539 . sprintf("%08o", $hdr->{size} % (1 << 24));
540 } else {
541 $sizeStr = sprintf("%011o", $hdr->{size});
542 }
543 my $data = pack($tar_pack_header,
544 substr($hdr->{name}, 0, 99),
545 sprintf("%07o", $hdr->{mode}),
546 sprintf("%07o", $hdr->{uid}),
547 sprintf("%07o", $hdr->{gid}),
548 $sizeStr,
549 sprintf("%011o", $hdr->{mtime}),
550 "", #checksum field - space padded by pack("A8")
551 $hdr->{type},
552 substr($hdr->{linkname}, 0, 99),
553 $hdr->{magic} || 'ustar ',
554 $hdr->{version} || ' ',
555 $hdr->{uname},
556 $hdr->{gname},
557 $devmajor,
558 $devminor,
559 "" # prefix is empty
560 );
561 substr($data, 148, 7) = sprintf("%06o\0", unpack("%16C*",$data));
562 TarWrite($fh, \$data);
563 }
564
565 sub TarWriteFileInfo
566 {
567 my($fh, $hdr) = @_;
568
569 #
570 # Handle long link names (symbolic links)
571 #
572 if ( length($hdr->{linkname}) > 99 ) {
573 my %h;
574 my $data = $hdr->{linkname} . "\0";
575 $h{name} = "././\@LongLink";
576 $h{type} = "K";
577 $h{size} = length($data);
578 TarWriteHeader($fh, \%h);
579 TarWrite($fh, \$data);
580 TarWritePad($fh, length($data));
581 }
582 #
583 # Handle long file names
584 #
585 if ( length($hdr->{name}) > 99 ) {
586 my %h;
587 my $data = $hdr->{name} . "\0";
588 $h{name} = "././\@LongLink";
589 $h{type} = "L";
590 $h{size} = length($data);
591 TarWriteHeader($fh, \%h);
592 TarWrite($fh, \$data);
593 TarWritePad($fh, length($data));
594 }
595 TarWriteHeader($fh, $hdr);
596 }
597
598 #
599 # seed cache of files in this increment
600 #
601 sub seedCache($$$) {
602 my ($host, $share, $dumpNo) = @_;
603
604 print STDERR curr_time(), "$host:$share #$dumpNo" if ($opts{v});
605 my $sql = q{
606 SELECT path,size
607 FROM files
608 JOIN shares on shares.id = shareid
609 JOIN hosts on hosts.id = shares.hostid
610 WHERE hosts.name = ? and shares.name = ? and backupnum = ?
611 };
612
613 my $sth = $dbh->prepare($sql);
614 $sth->execute($host, $share, $dumpNo);
615 my $count = $sth->rows;
616 print STDERR " $count items, parts:" if ($opts{v});
617 while (my $row = $sth->fetchrow_arrayref) {
618 #print STDERR "+ ", $row->[0],"\n";
619 $in_backup_increment->{ $row->[0] } = $row->[1];
620 }
621
622 $sth->finish();
623
624 return $count;
625 }
626
627 #
628 # calculate overhad for one file in tar
629 #
630 sub tar_overhead($) {
631 my $name = shift || '';
632
633 # header, padding of file and two null blocks at end
634 my $len = 4 * $tar_header_length;
635
636 # if filename is longer than 99 chars subtract blocks for
637 # long filename
638 if ( length($name) > 99 ) {
639 $len += int( ( length($name) + $tar_header_length ) / $tar_header_length ) * $tar_header_length;
640 }
641
642 return $len;
643 }
644
645 my $Attr;
646 my $AttrDir;
647
648 sub TarWriteFile
649 {
650 my($hdr, $fh, $tarPathOverride) = @_;
651
652 my $tarPath = $hdr->{relPath};
653 $tarPath = $tarPathOverride if ( defined($tarPathOverride) );
654
655 $tarPath =~ s{//+}{/}g;
656
657 #print STDERR "? $tarPath\n" if ($opts{d});
658 my $size = $in_backup_increment->{$tarPath};
659 return unless (defined($size));
660
661 # is this file too large to fit into MaxArchiveFileSize?
662
663 if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) {
664 print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
665 new_tar_part();
666 }
667
668 #print STDERR "A $tarPath [$size] tell: $current_tar_size\n" if ($opts{d});
669 $items_in_part++;
670
671 if ( defined($PathRemove)
672 && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) {
673 substr($tarPath, 0, length($PathRemove)) = $PathAdd;
674 }
675 $tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// );
676 $tarPath =~ s{//+}{/}g;
677 $hdr->{name} = $tarPath;
678
679 if ( $hdr->{type} == BPC_FTYPE_DIR ) {
680 #
681 # Directory: just write the header
682 #
683 $hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} );
684 TarWriteFileInfo($fh, $hdr);
685 $DirCnt++;
686 } elsif ( $hdr->{type} == BPC_FTYPE_FILE ) {
687 #
688 # Regular file: write the header and file
689 #
690 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
691 if ( !defined($f) ) {
692 print(STDERR "Unable to open file $hdr->{fullPath}\n");
693 $ErrorCnt++;
694 return;
695 }
696 # do we need to split file?
697 if ($hdr->{size} < $max_file_size) {
698 TarWriteFileInfo($fh, $hdr);
699 my($data, $size);
700 while ( $f->read(\$data, $BufSize) > 0 ) {
701 TarWrite($fh, \$data);
702 $size += length($data);
703 }
704 $f->close;
705 TarWritePad($fh, $size);
706 $FileCnt++;
707 $ByteCnt += $size;
708 } else {
709 my $full_size = $hdr->{size};
710 my $orig_name = $hdr->{name};
711 my $max_part_size = $max_file_size - tar_overhead($hdr->{name});
712
713 my $parts = int(($full_size + $max_part_size - 1) / $max_part_size);
714 print STDERR "# splitting $orig_name [$full_size bytes] into $parts parts\n" if ($opts{d});
715 foreach my $subpart ( 1 .. $parts ) {
716 new_tar_part();
717 if ($subpart < $parts) {
718 $hdr->{size} = $max_part_size;
719 } else {
720 $hdr->{size} = $full_size % $max_part_size;
721 }
722 $hdr->{name} = $orig_name . '/' . $subpart;
723 print STDERR "## creating part $subpart ",$hdr->{name}, " [", $hdr->{size}," bytes]\n";
724
725 TarWriteFileInfo($fh, $hdr);
726 my($data, $size);
727 if (0) {
728 for ( 1 .. int($hdr->{size} / $BufSize) ) {
729 my $r_size = $f->read(\$data, $BufSize);
730 die "expected $BufSize bytes read, got $r_size bytes!" if ($r_size != $BufSize);
731 TarWrite($fh, \$data);
732 $size += length($data);
733 }
734 }
735 my $size_left = $hdr->{size} % $BufSize;
736 my $r_size = $f->read(\$data, $size_left);
737 die "expected $size_left bytes last read, got $r_size bytes!" if ($r_size != $size_left);
738
739 TarWrite($fh, \$data);
740 $size += length($data);
741 TarWritePad($fh, $size);
742
743 $items_in_part++;
744 }
745 $f->close;
746 $FileCnt++;
747 $ByteCnt += $full_size;
748 new_tar_part();
749 }
750 } elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) {
751 #
752 # Hardlink file: either write a hardlink or the complete file
753 # depending upon whether the linked-to file will be written
754 # to the archive.
755 #
756 # Start by reading the contents of the link.
757 #
758 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
759 if ( !defined($f) ) {
760 print(STDERR "Unable to open file $hdr->{fullPath}\n");
761 $ErrorCnt++;
762 return;
763 }
764 my $data;
765 while ( $f->read(\$data, $BufSize) > 0 ) {
766 $hdr->{linkname} .= $data;
767 }
768 $f->close;
769 my $done = 0;
770 my $name = $hdr->{linkname};
771 $name =~ s{^\./}{/};
772 if ( $HardLinkExtraFiles{$name} ) {
773 #
774 # Target file will be or was written, so just remember
775 # the hardlink so we can dump it later.
776 #
777 push(@HardLinks, $hdr);
778 $SpecialCnt++;
779 } else {
780 #
781 # Have to dump the original file. Just call the top-level
782 # routine, so that we save the hassle of dealing with
783 # mangling, merging and attributes.
784 #
785 $HardLinkExtraFiles{$hdr->{linkname}} = 1;
786 archiveWrite($fh, $hdr->{linkname}, $hdr->{name});
787 }
788 } elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) {
789 #
790 # Symbolic link: read the symbolic link contents into the header
791 # and write the header.
792 #
793 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
794 if ( !defined($f) ) {
795 print(STDERR "Unable to open symlink file $hdr->{fullPath}\n");
796 $ErrorCnt++;
797 return;
798 }
799 my $data;
800 while ( $f->read(\$data, $BufSize) > 0 ) {
801 $hdr->{linkname} .= $data;
802 }
803 $f->close;
804 $hdr->{size} = 0;
805 TarWriteFileInfo($fh, $hdr);
806 $SpecialCnt++;
807 } elsif ( $hdr->{type} == BPC_FTYPE_CHARDEV
808 || $hdr->{type} == BPC_FTYPE_BLOCKDEV
809 || $hdr->{type} == BPC_FTYPE_FIFO ) {
810 #
811 # Special files: for char and block special we read the
812 # major and minor numbers from a plain file.
813 #
814 if ( $hdr->{type} != BPC_FTYPE_FIFO ) {
815 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0,
816 $hdr->{compress});
817 my $data;
818 if ( !defined($f) || $f->read(\$data, $BufSize) < 0 ) {
819 print(STDERR "Unable to open/read char/block special file"
820 . " $hdr->{fullPath}\n");
821 $f->close if ( defined($f) );
822 $ErrorCnt++;
823 return;
824 }
825 $f->close;
826 if ( $data =~ /(\d+),(\d+)/ ) {
827 $hdr->{devmajor} = $1;
828 $hdr->{devminor} = $2;
829 }
830 }
831 $hdr->{size} = 0;
832 TarWriteFileInfo($fh, $hdr);
833 $SpecialCnt++;
834 } else {
835 print(STDERR "Got unknown type $hdr->{type} for $hdr->{name}\n");
836 $ErrorCnt++;
837 }
838 }
839
840 my $t_fmt = '%Y-%m-%d %H:%M:%S';
841 sub curr_time {
842 return strftime($t_fmt,localtime());
843 }

Properties

Name Value
svn:executable *

  ViewVC Help
Powered by ViewVC 1.1.26