/[BackupPC]/trunk/bin/BackupPC_tarIncCreate
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/bin/BackupPC_tarIncCreate

Parent Directory Parent Directory | Revision Log Revision Log


Revision 354 - (show annotations)
Thu Apr 27 09:03:34 2006 UTC (18 years ago) by dpavlin
File size: 23632 byte(s)
 r596@athlon:  dpavlin | 2006-04-27 11:03:30 +0200
 removed double use of File::Path

1 #!/usr/bin/perl -w
2 #============================================================= -*-perl-*-
3 #
4 # BackupPC_tarIncCreate: create a tar archive of an existing incremental dump
5 #
6 #
7 # DESCRIPTION
8 #
9 # Usage: BackupPC_tarIncCreate [options]
10 #
11 # Flags:
12 # Required options:
13 #
14 # -h host Host from which the tar archive is created.
15 # -n dumpNum Dump number from which the tar archive is created.
16 # A negative number means relative to the end (eg -1
17 # means the most recent dump, -2 2nd most recent etc).
18 # -s shareName Share name from which the tar archive is created.
19 #
20 # Other options:
21 # -t print summary totals
22 # -r pathRemove path prefix that will be replaced with pathAdd
23 # -p pathAdd new path prefix
24 # -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
25 # -w writeBufSz write buffer size (default 1MB)
26 #
27 # The -h, -n and -s options specify which dump is used to generate
28 # the tar archive. The -r and -p options can be used to relocate
29 # the paths in the tar archive so extracted files can be placed
30 # in a location different from their original location.
31 #
32 # AUTHOR
33 # Craig Barratt <cbarratt@users.sourceforge.net>
34 # Ivan Klaric <iklaric@gmail.com>
35 # Dobrica Pavlinusic <dpavlin@rot13.org>
36 #
37 # COPYRIGHT
38 # Copyright (C) 2001-2003 Craig Barratt
39 #
40 # This program is free software; you can redistribute it and/or modify
41 # it under the terms of the GNU General Public License as published by
42 # the Free Software Foundation; either version 2 of the License, or
43 # (at your option) any later version.
44 #
45 # This program is distributed in the hope that it will be useful,
46 # but WITHOUT ANY WARRANTY; without even the implied warranty of
47 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
48 # GNU General Public License for more details.
49 #
50 # You should have received a copy of the GNU General Public License
51 # along with this program; if not, write to the Free Software
52 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
53 #
54 #========================================================================
55 #
56 # Version 2.1.0, released 20 Jun 2004.
57 #
58 # See http://backuppc.sourceforge.net.
59 #
60 #========================================================================
61
62 use strict;
63 no utf8;
64 use lib "__INSTALLDIR__/lib";
65 use File::Path;
66 use Getopt::Std;
67 use DBI;
68 use BackupPC::Lib;
69 use BackupPC::Attrib qw(:all);
70 use BackupPC::FileZIO;
71 use BackupPC::View;
72 use BackupPC::SearchLib;
73 use Time::HiRes qw/time/;
74 use POSIX qw/strftime/;
75 use File::Which;
76 use File::Slurp;
77 use Data::Dumper; ### FIXME
78
79 die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
80 my $TopDir = $bpc->TopDir();
81 my $BinDir = $bpc->BinDir();
82 my %Conf = $bpc->Conf();
83 %BackupPC::SearchLib::Conf = %Conf;
84 my %opts;
85 my $in_backup_increment;
86
87
88 if ( !getopts("th:n:p:r:s:b:w:vdf", \%opts) ) {
89 print STDERR <<EOF;
90 usage: $0 [options]
91 Required options:
92 -h host host from which the tar archive is created
93 -n dumpNum dump number from which the tar archive is created
94 A negative number means relative to the end (eg -1
95 means the most recent dump, -2 2nd most recent etc).
96 -s shareName share name from which the tar archive is created
97
98 Other options:
99 -t print summary totals
100 -r pathRemove path prefix that will be replaced with pathAdd
101 -p pathAdd new path prefix
102 -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
103 -w writeBufSz write buffer size (default 1048576 = 1MB)
104 -f overwrite existing parts
105 -v verbose output
106 -d debug output
107 EOF
108 exit(1);
109 }
110
111 if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) {
112 die "$0: bad host name '$opts{h}'\n";
113 }
114 my $Host = $opts{h};
115
116 if ( $opts{n} !~ /^(-?\d+)$/ ) {
117 die "$0: bad dump number '$opts{n}'\n";
118 }
119 my $Num = $opts{n};
120
121 my $bin;
122 foreach my $c (qw/gzip md5sum tee/) {
123 $bin->{$c} = which($c) || die "$0 needs $c, install it\n";
124 }
125
126 my @Backups = $bpc->BackupInfoRead($Host);
127 my $FileCnt = 0;
128 my $ByteCnt = 0;
129 my $DirCnt = 0;
130 my $SpecialCnt = 0;
131 my $ErrorCnt = 0;
132 my $current_tar_size = 0;
133 my $total_increment_size = 0;
134
135 my $i;
136 $Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 );
137 for ( $i = 0 ; $i < @Backups ; $i++ ) {
138 last if ( $Backups[$i]{num} == $Num );
139 }
140 if ( $i >= @Backups ) {
141 die "$0: bad backup number $Num for host $Host\n";
142 }
143
144 my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ );
145 my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ );
146 if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) {
147 die "$0: bad share name '$opts{s}'\n";
148 }
149 our $ShareName = $opts{s};
150 our $view = BackupPC::View->new($bpc, $Host, \@Backups);
151
152 # database
153
154 my $dsn = $Conf{SearchDSN};
155 my $db_user = $Conf{SearchUser} || '';
156
157 my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 0} );
158
159 my $sth_inc_size = $dbh->prepare(qq{
160 update backups set
161 inc_size = ?,
162 inc_deleted = false
163 where id = ?
164 });
165 my $sth_backup_parts = $dbh->prepare(qq{
166 insert into backup_parts (
167 backup_id,
168 part_nr,
169 tar_size,
170 size,
171 md5,
172 items
173 ) values (?,?,?,?,?,?)
174 });
175
176 #
177 # This constant and the line of code below that uses it are borrowed
178 # from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander.
179 # See www.cpan.org.
180 #
181 # Archive::Tar is Copyright 1997 Calle Dybedahl. All rights reserved.
182 # Copyright 1998 Stephen Zander. All rights reserved.
183 #
184 my $tar_pack_header
185 = 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a6 a2 a32 a32 a8 a8 a155 x12';
186 my $tar_header_length = 512;
187
188 my $BufSize = $opts{w} || 1048576; # 1MB or 2^20
189 my $WriteBuf = "";
190 my $WriteBufSz = ($opts{b} || 20) * $tar_header_length;
191
192 my(%UidCache, %GidCache);
193 my(%HardLinkExtraFiles, @HardLinks);
194
195 #
196 # Write out all the requested files/directories
197 #
198
199 my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar";
200
201 my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir};
202 die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir);
203
204 my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)";
205
206 my $tar_path_final = $tar_dir . '/' . $tar_file;
207 my $tar_path = $tar_path_final . '.tmp';
208
209 $tar_path =~ s#//#/#g;
210
211 my $sth = $dbh->prepare(qq{
212 SELECT
213 backups.id
214 FROM backups
215 JOIN shares on shares.id = shareid
216 JOIN hosts on hosts.id = shares.hostid
217 WHERE hosts.name = ? and shares.name = ? and backups.num = ?
218 });
219 $sth->execute($Host, $ShareName, $Num);
220 my ($backup_id) = $sth->fetchrow_array;
221 $sth->finish;
222
223
224 # delete exising backup_parts
225 my $sth_delete_backup_parts = $dbh->prepare(qq{
226 delete from backup_parts
227 where backup_id = ?
228 });
229 $sth_delete_backup_parts->execute($backup_id);
230
231
232 print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d});
233
234 if (-e $tar_path_final) {
235 if ($opts{f}) {
236 rmtree $tar_path_final || die "can't remove $tar_path_final: $!";
237 } else {
238 die "$tar_path_final allready exists\n";
239 }
240 }
241
242 my $fh;
243 my $part = 0;
244 my $no_files = 0;
245 my $items_in_part = 0;
246
247 sub new_tar_part {
248 my $arg = {@_};
249
250 if ($fh) {
251 return if ($current_tar_size == 0);
252
253 print STDERR "\n\t+ $part:";
254
255 #
256 # Finish with two null 512 byte headers,
257 # and then round out a full block.
258 #
259 my $data = "\0" x ($tar_header_length * 2);
260 TarWrite($fh, \$data);
261 TarWrite($fh, undef);
262
263 close($fh) || die "can't close archive part $part: $!";
264
265 my $file = $tar_path . '/' . $part;
266
267 my $md5 = read_file( $file . '.md5' ) || die "can't read md5sum file ${file}.md5";
268 $md5 =~ s/\s.*$//;
269
270 my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz";
271
272 print "$file, $size bytes, $items_in_part items";
273
274 $sth_backup_parts->execute(
275 $backup_id,
276 $part,
277 $current_tar_size,
278 $size,
279 $md5,
280 $items_in_part,
281 );
282
283 $total_increment_size += $size;
284
285 if ($arg->{close}) {
286
287 sub move($$) {
288 my ($from,$to) = @_;
289 print STDERR "# rename $from -> $to\n" if ($opts{d});
290 rename $from, $to || die "can't move $from -> $to: $!\n";
291 }
292
293 if ($part == 1) {
294 print STDERR " single" if ($opts{v});
295 move("${tar_path}/1.tar.gz", "${tar_path_final}.tar.gz");
296 move("${tar_path}/1.md5", "${tar_path_final}.md5");
297 rmtree $tar_path or die "can't remove temporary dir $tar_path: $!";
298 } else {
299 print STDERR " [last]" if ($opts{v});
300 move("${tar_path}", "${tar_path_final}");
301
302 # if this archive was single part, remove it
303 foreach my $suffix (qw/.tar.gz .md5/) {
304 my $path = $tar_path_final . $suffix;
305 unlink $path if (-e $path);
306 }
307 }
308
309 $sth_inc_size->execute(
310 $total_increment_size,
311 $backup_id
312 );
313
314 print "\n\ttotal $total_increment_size bytes";
315
316 return;
317 }
318
319 }
320
321 $part++;
322
323 # if this is first part, create directory
324
325 if ($part == 1) {
326 if (-e $tar_path) {
327 print STDERR "# deleting existing $tar_path\n" if ($opts{d});
328 rmtree($tar_path);
329 }
330 mkdir($tar_path) || die "can't create directory $tar_path: $!";
331
332 sub abort_cleanup {
333 print STDERR "ABORTED: cleanup temp dir ";
334 rmtree($tar_path);
335 $dbh->rollback;
336 exit 1;
337 }
338
339 $SIG{'INT'} = \&abort_cleanup;
340 $SIG{'QUIT'} = \&abort_cleanup;
341 $SIG{'__DIE__'} = \&abort_cleanup;
342
343 }
344
345 my $file = $tar_path . '/' . $part;
346
347 #
348 # create comprex pipe which will pass output through gzip
349 # for compression, create file on disk using tee
350 # and pipe same output to md5sum to create checksum
351 #
352
353 my $cmd = '| ' . $bin->{'gzip'} . ' ' . $Conf{GzipLevel} . ' ' .
354 '| ' . $bin->{'tee'} . ' ' . $file . '.tar.gz' . ' ' .
355 '| ' . $bin->{'md5sum'} . ' - > ' . $file . '.md5';
356
357 print STDERR "## $cmd\n" if ($opts{d});
358
359 open($fh, $cmd) or die "can't open $cmd: $!";
360 binmode($fh);
361
362 $current_tar_size = 0;
363 $items_in_part = 0;
364 }
365
366 new_tar_part();
367
368 if (seedCache($Host, $ShareName, $Num)) {
369 archiveWrite($fh, '/');
370 archiveWriteHardLinks($fh);
371 new_tar_part( close => 1 );
372 } else {
373 print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v});
374 # remove temporary files if there are no files
375 rmtree($tar_path);
376
377 my $sth = $dbh->prepare(qq{
378 update backups set inc_size = 0, inc_deleted = true
379 where id = ?
380 });
381 $sth->execute($backup_id);
382
383 }
384
385 #
386 # print out totals if requested
387 #
388 if ( $opts{t} ) {
389 print STDERR "Done: $FileCnt files, $ByteCnt bytes, $DirCnt dirs,",
390 " $SpecialCnt specials, $ErrorCnt errors\n";
391 }
392 if ( $ErrorCnt && !$FileCnt && !$DirCnt ) {
393 #
394 # Got errors, with no files or directories; exit with non-zero
395 # status
396 #
397 die "got errors or no files\n";
398 }
399
400 $sth_inc_size->finish;
401 $sth_backup_parts->finish;
402
403 $dbh->commit || die "can't commit changes to database";
404 $dbh->disconnect();
405
406 exit;
407
408 ###########################################################################
409 # Subroutines
410 ###########################################################################
411
412 sub archiveWrite
413 {
414 my($fh, $dir, $tarPathOverride) = @_;
415
416 if ( $dir =~ m{(^|/)\.\.(/|$)} ) {
417 print(STDERR "$0: bad directory '$dir'\n");
418 $ErrorCnt++;
419 return;
420 }
421 $dir = "/" if ( $dir eq "." );
422 #print(STDERR "calling find with $Num, $ShareName, $dir\n");
423
424 if ( $view->find($Num, $ShareName, $dir, 0, \&TarWriteFile,
425 $fh, $tarPathOverride) < 0 ) {
426 print(STDERR "$0: bad share or directory '$ShareName/$dir'\n");
427 $ErrorCnt++;
428 return;
429 }
430 }
431
432 #
433 # Write out any hardlinks (if any)
434 #
435 sub archiveWriteHardLinks
436 {
437 my $fh = @_;
438 foreach my $hdr ( @HardLinks ) {
439 $hdr->{size} = 0;
440 if ( defined($PathRemove)
441 && substr($hdr->{linkname}, 0, length($PathRemove)+1)
442 eq ".$PathRemove" ) {
443 substr($hdr->{linkname}, 0, length($PathRemove)+1) = ".$PathAdd";
444 }
445 TarWriteFileInfo($fh, $hdr);
446 }
447 @HardLinks = ();
448 %HardLinkExtraFiles = ();
449 }
450
451 sub UidLookup
452 {
453 my($uid) = @_;
454
455 $UidCache{$uid} = (getpwuid($uid))[0] if ( !exists($UidCache{$uid}) );
456 return $UidCache{$uid};
457 }
458
459 sub GidLookup
460 {
461 my($gid) = @_;
462
463 $GidCache{$gid} = (getgrgid($gid))[0] if ( !exists($GidCache{$gid}) );
464 return $GidCache{$gid};
465 }
466
467 sub TarWrite
468 {
469 my($fh, $dataRef) = @_;
470
471
472 if ( !defined($dataRef) ) {
473 #
474 # do flush by padding to a full $WriteBufSz
475 #
476 my $data = "\0" x ($WriteBufSz - length($WriteBuf));
477 $dataRef = \$data;
478 }
479
480 # poor man's tell :-)
481 $current_tar_size += length($$dataRef);
482
483 if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) {
484 #
485 # just buffer and return
486 #
487 $WriteBuf .= $$dataRef;
488 return;
489 }
490 my $done = $WriteBufSz - length($WriteBuf);
491 if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done))
492 != $WriteBufSz ) {
493 die "Unable to write to output file ($!)\n";
494 }
495 while ( $done + $WriteBufSz <= length($$dataRef) ) {
496 if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz))
497 != $WriteBufSz ) {
498 die "Unable to write to output file ($!)\n";
499 }
500 $done += $WriteBufSz;
501 }
502 $WriteBuf = substr($$dataRef, $done);
503 }
504
505 sub TarWritePad
506 {
507 my($fh, $size) = @_;
508
509 if ( $size % $tar_header_length ) {
510 my $data = "\0" x ($tar_header_length - ($size % $tar_header_length));
511 TarWrite($fh, \$data);
512 }
513 }
514
515 sub TarWriteHeader
516 {
517 my($fh, $hdr) = @_;
518
519 $hdr->{uname} = UidLookup($hdr->{uid}) if ( !defined($hdr->{uname}) );
520 $hdr->{gname} = GidLookup($hdr->{gid}) if ( !defined($hdr->{gname}) );
521 my $devmajor = defined($hdr->{devmajor}) ? sprintf("%07o", $hdr->{devmajor})
522 : "";
523 my $devminor = defined($hdr->{devminor}) ? sprintf("%07o", $hdr->{devminor})
524 : "";
525 my $sizeStr;
526 if ( $hdr->{size} >= 2 * 65536 * 65536 ) {
527 #
528 # GNU extension for files >= 8GB: send size in big-endian binary
529 #
530 $sizeStr = pack("c4 N N", 0x80, 0, 0, 0,
531 $hdr->{size} / (65536 * 65536),
532 $hdr->{size} % (65536 * 65536));
533 } elsif ( $hdr->{size} >= 1 * 65536 * 65536 ) {
534 #
535 # sprintf octal only handles up to 2^32 - 1
536 #
537 $sizeStr = sprintf("%03o", $hdr->{size} / (1 << 24))
538 . sprintf("%08o", $hdr->{size} % (1 << 24));
539 } else {
540 $sizeStr = sprintf("%011o", $hdr->{size});
541 }
542 my $data = pack($tar_pack_header,
543 substr($hdr->{name}, 0, 99),
544 sprintf("%07o", $hdr->{mode}),
545 sprintf("%07o", $hdr->{uid}),
546 sprintf("%07o", $hdr->{gid}),
547 $sizeStr,
548 sprintf("%011o", $hdr->{mtime}),
549 "", #checksum field - space padded by pack("A8")
550 $hdr->{type},
551 substr($hdr->{linkname}, 0, 99),
552 $hdr->{magic} || 'ustar ',
553 $hdr->{version} || ' ',
554 $hdr->{uname},
555 $hdr->{gname},
556 $devmajor,
557 $devminor,
558 "" # prefix is empty
559 );
560 substr($data, 148, 7) = sprintf("%06o\0", unpack("%16C*",$data));
561 TarWrite($fh, \$data);
562 }
563
564 sub TarWriteFileInfo
565 {
566 my($fh, $hdr) = @_;
567
568 #
569 # Handle long link names (symbolic links)
570 #
571 if ( length($hdr->{linkname}) > 99 ) {
572 my %h;
573 my $data = $hdr->{linkname} . "\0";
574 $h{name} = "././\@LongLink";
575 $h{type} = "K";
576 $h{size} = length($data);
577 TarWriteHeader($fh, \%h);
578 TarWrite($fh, \$data);
579 TarWritePad($fh, length($data));
580 }
581 #
582 # Handle long file names
583 #
584 if ( length($hdr->{name}) > 99 ) {
585 my %h;
586 my $data = $hdr->{name} . "\0";
587 $h{name} = "././\@LongLink";
588 $h{type} = "L";
589 $h{size} = length($data);
590 TarWriteHeader($fh, \%h);
591 TarWrite($fh, \$data);
592 TarWritePad($fh, length($data));
593 }
594 TarWriteHeader($fh, $hdr);
595 }
596
597 #
598 # seed cache of files in this increment
599 #
600 sub seedCache($$$) {
601 my ($host, $share, $dumpNo) = @_;
602
603 print STDERR curr_time(), "$host:$share #$dumpNo" if ($opts{v});
604 my $sql = q{
605 SELECT path,size
606 FROM files
607 JOIN shares on shares.id = shareid
608 JOIN hosts on hosts.id = shares.hostid
609 WHERE hosts.name = ? and shares.name = ? and backupnum = ?
610 };
611
612 my $sth = $dbh->prepare($sql);
613 $sth->execute($host, $share, $dumpNo);
614 my $count = $sth->rows;
615 print STDERR " $count items, parts:" if ($opts{v});
616 while (my $row = $sth->fetchrow_arrayref) {
617 #print STDERR "+ ", $row->[0],"\n";
618 $in_backup_increment->{ $row->[0] } = $row->[1];
619 }
620
621 $sth->finish();
622
623 return $count;
624 }
625
626 #
627 # calculate overhad for one file in tar
628 #
629 sub tar_overhead($) {
630 my $name = shift || '';
631
632 # header, padding of file and two null blocks at end
633 my $len = 4 * $tar_header_length;
634
635 # if filename is longer than 99 chars subtract blocks for
636 # long filename
637 if ( length($name) > 99 ) {
638 $len += int( ( length($name) + $tar_header_length ) / $tar_header_length ) * $tar_header_length;
639 }
640
641 return $len;
642 }
643
644 my $Attr;
645 my $AttrDir;
646
647 sub TarWriteFile
648 {
649 my($hdr, $fh, $tarPathOverride) = @_;
650
651 my $tarPath = $hdr->{relPath};
652 $tarPath = $tarPathOverride if ( defined($tarPathOverride) );
653
654 $tarPath =~ s{//+}{/}g;
655
656 #print STDERR "? $tarPath\n" if ($opts{d});
657 my $size = $in_backup_increment->{$tarPath};
658 return unless (defined($size));
659
660 # is this file too large to fit into MaxArchiveFileSize?
661
662 if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) {
663 print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
664 new_tar_part();
665 }
666
667 #print STDERR "A $tarPath [$size] tell: $current_tar_size\n" if ($opts{d});
668 $items_in_part++;
669
670 if ( defined($PathRemove)
671 && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) {
672 substr($tarPath, 0, length($PathRemove)) = $PathAdd;
673 }
674 $tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// );
675 $tarPath =~ s{//+}{/}g;
676 $hdr->{name} = $tarPath;
677
678 if ( $hdr->{type} == BPC_FTYPE_DIR ) {
679 #
680 # Directory: just write the header
681 #
682 $hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} );
683 TarWriteFileInfo($fh, $hdr);
684 $DirCnt++;
685 } elsif ( $hdr->{type} == BPC_FTYPE_FILE ) {
686 #
687 # Regular file: write the header and file
688 #
689 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
690 if ( !defined($f) ) {
691 print(STDERR "Unable to open file $hdr->{fullPath}\n");
692 $ErrorCnt++;
693 return;
694 }
695 # do we need to split file?
696 if ($hdr->{size} < $max_file_size) {
697 TarWriteFileInfo($fh, $hdr);
698 my($data, $size);
699 while ( $f->read(\$data, $BufSize) > 0 ) {
700 TarWrite($fh, \$data);
701 $size += length($data);
702 }
703 $f->close;
704 TarWritePad($fh, $size);
705 $FileCnt++;
706 $ByteCnt += $size;
707 } else {
708 my $full_size = $hdr->{size};
709 my $orig_name = $hdr->{name};
710 my $max_part_size = $max_file_size - tar_overhead($hdr->{name});
711
712 my $parts = int(($full_size + $max_part_size - 1) / $max_part_size);
713 print STDERR "# splitting $orig_name [$full_size bytes] into $parts parts\n" if ($opts{d});
714 foreach my $subpart ( 1 .. $parts ) {
715 new_tar_part();
716 if ($subpart < $parts) {
717 $hdr->{size} = $max_part_size;
718 } else {
719 $hdr->{size} = $full_size % $max_part_size;
720 }
721 $hdr->{name} = $orig_name . '/' . $subpart;
722 print STDERR "## creating part $subpart ",$hdr->{name}, " [", $hdr->{size}," bytes]\n";
723
724 TarWriteFileInfo($fh, $hdr);
725 my($data, $size);
726 if (0) {
727 for ( 1 .. int($hdr->{size} / $BufSize) ) {
728 my $r_size = $f->read(\$data, $BufSize);
729 die "expected $BufSize bytes read, got $r_size bytes!" if ($r_size != $BufSize);
730 TarWrite($fh, \$data);
731 $size += length($data);
732 }
733 }
734 my $size_left = $hdr->{size} % $BufSize;
735 my $r_size = $f->read(\$data, $size_left);
736 die "expected $size_left bytes last read, got $r_size bytes!" if ($r_size != $size_left);
737
738 TarWrite($fh, \$data);
739 $size += length($data);
740 TarWritePad($fh, $size);
741
742 $items_in_part++;
743 }
744 $f->close;
745 $FileCnt++;
746 $ByteCnt += $full_size;
747 new_tar_part();
748 }
749 } elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) {
750 #
751 # Hardlink file: either write a hardlink or the complete file
752 # depending upon whether the linked-to file will be written
753 # to the archive.
754 #
755 # Start by reading the contents of the link.
756 #
757 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
758 if ( !defined($f) ) {
759 print(STDERR "Unable to open file $hdr->{fullPath}\n");
760 $ErrorCnt++;
761 return;
762 }
763 my $data;
764 while ( $f->read(\$data, $BufSize) > 0 ) {
765 $hdr->{linkname} .= $data;
766 }
767 $f->close;
768 my $done = 0;
769 my $name = $hdr->{linkname};
770 $name =~ s{^\./}{/};
771 if ( $HardLinkExtraFiles{$name} ) {
772 #
773 # Target file will be or was written, so just remember
774 # the hardlink so we can dump it later.
775 #
776 push(@HardLinks, $hdr);
777 $SpecialCnt++;
778 } else {
779 #
780 # Have to dump the original file. Just call the top-level
781 # routine, so that we save the hassle of dealing with
782 # mangling, merging and attributes.
783 #
784 $HardLinkExtraFiles{$hdr->{linkname}} = 1;
785 archiveWrite($fh, $hdr->{linkname}, $hdr->{name});
786 }
787 } elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) {
788 #
789 # Symbolic link: read the symbolic link contents into the header
790 # and write the header.
791 #
792 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
793 if ( !defined($f) ) {
794 print(STDERR "Unable to open symlink file $hdr->{fullPath}\n");
795 $ErrorCnt++;
796 return;
797 }
798 my $data;
799 while ( $f->read(\$data, $BufSize) > 0 ) {
800 $hdr->{linkname} .= $data;
801 }
802 $f->close;
803 $hdr->{size} = 0;
804 TarWriteFileInfo($fh, $hdr);
805 $SpecialCnt++;
806 } elsif ( $hdr->{type} == BPC_FTYPE_CHARDEV
807 || $hdr->{type} == BPC_FTYPE_BLOCKDEV
808 || $hdr->{type} == BPC_FTYPE_FIFO ) {
809 #
810 # Special files: for char and block special we read the
811 # major and minor numbers from a plain file.
812 #
813 if ( $hdr->{type} != BPC_FTYPE_FIFO ) {
814 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0,
815 $hdr->{compress});
816 my $data;
817 if ( !defined($f) || $f->read(\$data, $BufSize) < 0 ) {
818 print(STDERR "Unable to open/read char/block special file"
819 . " $hdr->{fullPath}\n");
820 $f->close if ( defined($f) );
821 $ErrorCnt++;
822 return;
823 }
824 $f->close;
825 if ( $data =~ /(\d+),(\d+)/ ) {
826 $hdr->{devmajor} = $1;
827 $hdr->{devminor} = $2;
828 }
829 }
830 $hdr->{size} = 0;
831 TarWriteFileInfo($fh, $hdr);
832 $SpecialCnt++;
833 } else {
834 print(STDERR "Got unknown type $hdr->{type} for $hdr->{name}\n");
835 $ErrorCnt++;
836 }
837 }
838
839 my $t_fmt = '%Y-%m-%d %H:%M:%S';
840 sub curr_time {
841 return strftime($t_fmt,localtime());
842 }

Properties

Name Value
svn:executable *

  ViewVC Help
Powered by ViewVC 1.1.26