/[BackupPC]/trunk/bin/BackupPC_tarIncCreate
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/bin/BackupPC_tarIncCreate

Parent Directory Parent Directory | Revision Log Revision Log


Revision 235 - (show annotations)
Thu Nov 10 15:07:10 2005 UTC (18 years, 6 months ago) by dpavlin
File size: 22363 byte(s)
 r8747@llin:  dpavlin | 2005-11-10 16:06:57 +0100
 added backup_parts table to track parts of each backup, rewrote
 BackupPC_tarIncCreate to use backup_parts

1 #!/usr/bin/perl -w
2 #============================================================= -*-perl-*-
3 #
4 # BackupPC_tarIncCreate: create a tar archive of an existing incremental dump
5 #
6 #
7 # DESCRIPTION
8 #
9 # Usage: BackupPC_tarIncCreate [options]
10 #
11 # Flags:
12 # Required options:
13 #
14 # -h host Host from which the tar archive is created.
15 # -n dumpNum Dump number from which the tar archive is created.
16 # A negative number means relative to the end (eg -1
17 # means the most recent dump, -2 2nd most recent etc).
18 # -s shareName Share name from which the tar archive is created.
19 #
20 # Other options:
21 # -t print summary totals
22 # -r pathRemove path prefix that will be replaced with pathAdd
23 # -p pathAdd new path prefix
24 # -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
25 # -w writeBufSz write buffer size (default 1MB)
26 #
27 # The -h, -n and -s options specify which dump is used to generate
28 # the tar archive. The -r and -p options can be used to relocate
29 # the paths in the tar archive so extracted files can be placed
30 # in a location different from their original location.
31 #
32 # AUTHOR
33 # Craig Barratt <cbarratt@users.sourceforge.net>
34 # Ivan Klaric <iklaric@gmail.com>
35 # Dobrica Pavlinusic <dpavlin@rot13.org>
36 #
37 # COPYRIGHT
38 # Copyright (C) 2001-2003 Craig Barratt
39 #
40 # This program is free software; you can redistribute it and/or modify
41 # it under the terms of the GNU General Public License as published by
42 # the Free Software Foundation; either version 2 of the License, or
43 # (at your option) any later version.
44 #
45 # This program is distributed in the hope that it will be useful,
46 # but WITHOUT ANY WARRANTY; without even the implied warranty of
47 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
48 # GNU General Public License for more details.
49 #
50 # You should have received a copy of the GNU General Public License
51 # along with this program; if not, write to the Free Software
52 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
53 #
54 #========================================================================
55 #
56 # Version 2.1.0, released 20 Jun 2004.
57 #
58 # See http://backuppc.sourceforge.net.
59 #
60 #========================================================================
61
62 use strict;
63 no utf8;
64 use lib "__INSTALLDIR__/lib";
65 use File::Path;
66 use Getopt::Std;
67 use DBI;
68 use BackupPC::Lib;
69 use BackupPC::Attrib qw(:all);
70 use BackupPC::FileZIO;
71 use BackupPC::View;
72 use BackupPC::SearchLib;
73 use Time::HiRes qw/time/;
74 use POSIX qw/strftime/;
75 use File::Which;
76 use File::Path;
77 use File::Slurp;
78 use Data::Dumper; ### FIXME
79
80 die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
81 my $TopDir = $bpc->TopDir();
82 my $BinDir = $bpc->BinDir();
83 my %Conf = $bpc->Conf();
84 %BackupPC::SearchLib::Conf = %Conf;
85 my %opts;
86 my $in_backup_increment;
87
88
89 if ( !getopts("th:n:p:r:s:b:w:vd", \%opts) ) {
90 print STDERR <<EOF;
91 usage: $0 [options]
92 Required options:
93 -h host host from which the tar archive is created
94 -n dumpNum dump number from which the tar archive is created
95 A negative number means relative to the end (eg -1
96 means the most recent dump, -2 2nd most recent etc).
97 -s shareName share name from which the tar archive is created
98
99 Other options:
100 -t print summary totals
101 -r pathRemove path prefix that will be replaced with pathAdd
102 -p pathAdd new path prefix
103 -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
104 -w writeBufSz write buffer size (default 1048576 = 1MB)
105 -v verbose output
106 -d debug output
107 EOF
108 exit(1);
109 }
110
111 if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) {
112 print(STDERR "$0: bad host name '$opts{h}'\n");
113 exit(1);
114 }
115 my $Host = $opts{h};
116
117 if ( $opts{n} !~ /^(-?\d+)$/ ) {
118 print(STDERR "$0: bad dump number '$opts{n}'\n");
119 exit(1);
120 }
121 my $Num = $opts{n};
122
123 my $bin;
124 foreach my $c (qw/gzip md5sum tee/) {
125 $bin->{$c} = which($c) || die "$0 needs $c, install it\n";
126 }
127
128 my @Backups = $bpc->BackupInfoRead($Host);
129 my $FileCnt = 0;
130 my $ByteCnt = 0;
131 my $DirCnt = 0;
132 my $SpecialCnt = 0;
133 my $ErrorCnt = 0;
134 my $current_tar_size = 0;
135
136 my $i;
137 $Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 );
138 for ( $i = 0 ; $i < @Backups ; $i++ ) {
139 last if ( $Backups[$i]{num} == $Num );
140 }
141 if ( $i >= @Backups ) {
142 print(STDERR "$0: bad backup number $Num for host $Host\n");
143 exit(1);
144 }
145
146 my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ );
147 my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ );
148 if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) {
149 print(STDERR "$0: bad share name '$opts{s}'\n");
150 exit(1);
151 }
152 our $ShareName = $opts{s};
153 our $view = BackupPC::View->new($bpc, $Host, \@Backups);
154
155 # database
156
157 my $dsn = $Conf{SearchDSN};
158 my $db_user = $Conf{SearchUser} || '';
159
160 my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 0} );
161
162 my $sth_inc_size = $dbh->prepare(qq{
163 update backups set
164 inc_size = ?,
165 parts = ?,
166 inc_deleted = false
167 where id = ? });
168 my $sth_backup_parts = $dbh->prepare(qq{
169 insert into backup_parts (
170 backup_id,
171 part_nr,
172 tar_size,
173 size,
174 md5,
175 items
176 ) values (?,?,?,?,?,?)
177 });
178
179 #
180 # This constant and the line of code below that uses it are borrowed
181 # from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander.
182 # See www.cpan.org.
183 #
184 # Archive::Tar is Copyright 1997 Calle Dybedahl. All rights reserved.
185 # Copyright 1998 Stephen Zander. All rights reserved.
186 #
187 my $tar_pack_header
188 = 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a6 a2 a32 a32 a8 a8 a155 x12';
189 my $tar_header_length = 512;
190
191 my $BufSize = $opts{w} || 1048576; # 1MB or 2^20
192 my $WriteBuf = "";
193 my $WriteBufSz = ($opts{b} || 20) * $tar_header_length;
194
195 my(%UidCache, %GidCache);
196 my(%HardLinkExtraFiles, @HardLinks);
197
198 #
199 # Write out all the requested files/directories
200 #
201
202 my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar";
203 $max_file_size *= 1024;
204
205 my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir};
206 die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir);
207
208 my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)";
209
210 my $tar_path = $tar_dir . '/' . $tar_file . '.tmp';
211 $tar_path =~ s#//#/#g;
212
213 my $sth = $dbh->prepare(qq{
214 SELECT
215 backups.id
216 FROM backups
217 JOIN shares on shares.id = shareid
218 JOIN hosts on hosts.id = shares.hostid
219 WHERE hosts.name = ? and shares.name = ? and backups.num = ?
220 });
221 $sth->execute($Host, $ShareName, $Num);
222 my ($backup_id) = $sth->fetchrow_array;
223 $sth->finish;
224
225 print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d});
226
227
228 my $fh;
229 my $part = 0;
230 my $no_files = 0;
231 my $items_in_part = 0;
232
233 sub new_tar_part {
234 if ($fh) {
235 return if ($current_tar_size == 0);
236
237 print STDERR "# closing part $part\n" if ($opts{d});
238
239 # finish tar archive
240 my $data = "\0" x ($tar_header_length * 2);
241 TarWrite($fh, \$data);
242 TarWrite($fh, undef);
243
244 close($fh) || die "can't close archive part $part: $!";
245
246 my $file = $tar_path . '/' . $part;
247
248 my $md5 = read_file( $file . '.md5' ) || die "can't read md5sum file ${file}.md5";
249 $md5 =~ s/\s.*$//;
250
251 my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz";
252
253 $sth_backup_parts->execute(
254 $backup_id,
255 $part,
256 $current_tar_size,
257 $size,
258 $md5,
259 $items_in_part,
260 );
261
262 }
263
264 $part++;
265
266 # if this is first part, create directory
267
268 if ($part == 1) {
269 if (-d $tar_path) {
270 print STDERR "# deleting existing $tar_path\n" if ($opts{d});
271 rmtree($tar_path);
272 }
273 mkdir($tar_path) || die "can't create directory $tar_path: $!";
274 }
275
276 my $file = $tar_path . '/' . $part;
277
278 #
279 # create comprex pipe which will pass output through gzip
280 # for compression, create file on disk using tee
281 # and pipe same output to md5sum to create checksum
282 #
283
284 my $cmd = '| ' . $bin->{'gzip'} . ' ' . $Conf{GzipLevel} . ' ' .
285 '| ' . $bin->{'tee'} . ' ' . $file . '.tar.gz' . ' ' .
286 '| ' . $bin->{'md5sum'} . ' - > ' . $file . '.md5';
287
288 print STDERR "## $cmd\n" if ($opts{d});
289
290 open($fh, $cmd) or die "can't open $cmd: $!";
291 binmode($fh);
292
293 $current_tar_size = 0;
294 $items_in_part = 0;
295 }
296
297 new_tar_part();
298
299 if (seedCache($Host, $ShareName, $Num)) {
300 archiveWrite($fh, '/');
301 archiveWriteHardLinks($fh);
302 } else {
303 print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v});
304 $no_files = 1;
305 }
306
307 #
308 # Finish with two null 512 byte headers, and then round out a full
309 # block.
310 #
311 my $data = "\0" x ($tar_header_length * 2);
312 TarWrite($fh, \$data);
313 TarWrite($fh, undef);
314
315 if (! close($fh)) {
316 rmtree($tar_path);
317 die "can't close archive\n";
318 }
319
320 # remove temporary files if there are no files
321 if ($no_files) {
322 rmtree($tar_path);
323 } elsif ($part == 1) {
324 warn "FIXME: if there is only one part move to parent directory and rename";
325 }
326
327 #
328 # print out totals if requested
329 #
330 if ( $opts{t} ) {
331 print STDERR "Done: $FileCnt files, $ByteCnt bytes, $DirCnt dirs,",
332 " $SpecialCnt specials, $ErrorCnt errors\n";
333 }
334 if ( $ErrorCnt && !$FileCnt && !$DirCnt ) {
335 #
336 # Got errors, with no files or directories; exit with non-zero
337 # status
338 #
339 cleanup();
340 exit(1);
341 }
342
343 $sth_inc_size->finish;
344 $sth_backup_parts->finish;
345
346 $dbh->commit || die "can't commit changes to database";
347 $dbh->disconnect();
348
349 exit(0);
350
351 ###########################################################################
352 # Subroutines
353 ###########################################################################
354
355 sub archiveWrite
356 {
357 my($fh, $dir, $tarPathOverride) = @_;
358
359 if ( $dir =~ m{(^|/)\.\.(/|$)} ) {
360 print(STDERR "$0: bad directory '$dir'\n");
361 $ErrorCnt++;
362 return;
363 }
364 $dir = "/" if ( $dir eq "." );
365 #print(STDERR "calling find with $Num, $ShareName, $dir\n");
366
367 if ( $view->find($Num, $ShareName, $dir, 0, \&TarWriteFile,
368 $fh, $tarPathOverride) < 0 ) {
369 print(STDERR "$0: bad share or directory '$ShareName/$dir'\n");
370 $ErrorCnt++;
371 return;
372 }
373 }
374
375 #
376 # Write out any hardlinks (if any)
377 #
378 sub archiveWriteHardLinks
379 {
380 my $fh = @_;
381 foreach my $hdr ( @HardLinks ) {
382 $hdr->{size} = 0;
383 if ( defined($PathRemove)
384 && substr($hdr->{linkname}, 0, length($PathRemove)+1)
385 eq ".$PathRemove" ) {
386 substr($hdr->{linkname}, 0, length($PathRemove)+1) = ".$PathAdd";
387 }
388 TarWriteFileInfo($fh, $hdr);
389 }
390 @HardLinks = ();
391 %HardLinkExtraFiles = ();
392 }
393
394 sub UidLookup
395 {
396 my($uid) = @_;
397
398 $UidCache{$uid} = (getpwuid($uid))[0] if ( !exists($UidCache{$uid}) );
399 return $UidCache{$uid};
400 }
401
402 sub GidLookup
403 {
404 my($gid) = @_;
405
406 $GidCache{$gid} = (getgrgid($gid))[0] if ( !exists($GidCache{$gid}) );
407 return $GidCache{$gid};
408 }
409
410 sub TarWrite
411 {
412 my($fh, $dataRef) = @_;
413
414
415 if ( !defined($dataRef) ) {
416 #
417 # do flush by padding to a full $WriteBufSz
418 #
419 my $data = "\0" x ($WriteBufSz - length($WriteBuf));
420 $dataRef = \$data;
421 }
422
423 # poor man's tell :-)
424 $current_tar_size += length($$dataRef);
425
426 if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) {
427 #
428 # just buffer and return
429 #
430 $WriteBuf .= $$dataRef;
431 return;
432 }
433 my $done = $WriteBufSz - length($WriteBuf);
434 if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done))
435 != $WriteBufSz ) {
436 print(STDERR "Unable to write to output file ($!)\n");
437 exit(1);
438 }
439 while ( $done + $WriteBufSz <= length($$dataRef) ) {
440 if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz))
441 != $WriteBufSz ) {
442 print(STDERR "Unable to write to output file ($!)\n");
443 exit(1);
444 }
445 $done += $WriteBufSz;
446 }
447 $WriteBuf = substr($$dataRef, $done);
448 }
449
450 sub TarWritePad
451 {
452 my($fh, $size) = @_;
453
454 if ( $size % $tar_header_length ) {
455 my $data = "\0" x ($tar_header_length - ($size % $tar_header_length));
456 TarWrite($fh, \$data);
457 }
458 }
459
460 sub TarWriteHeader
461 {
462 my($fh, $hdr) = @_;
463
464 $hdr->{uname} = UidLookup($hdr->{uid}) if ( !defined($hdr->{uname}) );
465 $hdr->{gname} = GidLookup($hdr->{gid}) if ( !defined($hdr->{gname}) );
466 my $devmajor = defined($hdr->{devmajor}) ? sprintf("%07o", $hdr->{devmajor})
467 : "";
468 my $devminor = defined($hdr->{devminor}) ? sprintf("%07o", $hdr->{devminor})
469 : "";
470 my $sizeStr;
471 if ( $hdr->{size} >= 2 * 65536 * 65536 ) {
472 #
473 # GNU extension for files >= 8GB: send size in big-endian binary
474 #
475 $sizeStr = pack("c4 N N", 0x80, 0, 0, 0,
476 $hdr->{size} / (65536 * 65536),
477 $hdr->{size} % (65536 * 65536));
478 } elsif ( $hdr->{size} >= 1 * 65536 * 65536 ) {
479 #
480 # sprintf octal only handles up to 2^32 - 1
481 #
482 $sizeStr = sprintf("%03o", $hdr->{size} / (1 << 24))
483 . sprintf("%08o", $hdr->{size} % (1 << 24));
484 } else {
485 $sizeStr = sprintf("%011o", $hdr->{size});
486 }
487 my $data = pack($tar_pack_header,
488 substr($hdr->{name}, 0, 99),
489 sprintf("%07o", $hdr->{mode}),
490 sprintf("%07o", $hdr->{uid}),
491 sprintf("%07o", $hdr->{gid}),
492 $sizeStr,
493 sprintf("%011o", $hdr->{mtime}),
494 "", #checksum field - space padded by pack("A8")
495 $hdr->{type},
496 substr($hdr->{linkname}, 0, 99),
497 $hdr->{magic} || 'ustar ',
498 $hdr->{version} || ' ',
499 $hdr->{uname},
500 $hdr->{gname},
501 $devmajor,
502 $devminor,
503 "" # prefix is empty
504 );
505 substr($data, 148, 7) = sprintf("%06o\0", unpack("%16C*",$data));
506 TarWrite($fh, \$data);
507 }
508
509 sub TarWriteFileInfo
510 {
511 my($fh, $hdr) = @_;
512
513 #
514 # Handle long link names (symbolic links)
515 #
516 if ( length($hdr->{linkname}) > 99 ) {
517 my %h;
518 my $data = $hdr->{linkname} . "\0";
519 $h{name} = "././\@LongLink";
520 $h{type} = "K";
521 $h{size} = length($data);
522 TarWriteHeader($fh, \%h);
523 TarWrite($fh, \$data);
524 TarWritePad($fh, length($data));
525 }
526 #
527 # Handle long file names
528 #
529 if ( length($hdr->{name}) > 99 ) {
530 my %h;
531 my $data = $hdr->{name} . "\0";
532 $h{name} = "././\@LongLink";
533 $h{type} = "L";
534 $h{size} = length($data);
535 TarWriteHeader($fh, \%h);
536 TarWrite($fh, \$data);
537 TarWritePad($fh, length($data));
538 }
539 TarWriteHeader($fh, $hdr);
540 }
541
542 #
543 # seed cache of files in this increment
544 #
545 sub seedCache($$$) {
546 my ($host, $share, $dumpNo) = @_;
547
548 print STDERR curr_time(), "getting files for $host:$share increment $dumpNo..." if ($opts{v});
549 my $sql = q{
550 SELECT path,size
551 FROM files
552 JOIN shares on shares.id = shareid
553 JOIN hosts on hosts.id = shares.hostid
554 WHERE hosts.name = ? and shares.name = ? and backupnum = ?
555 };
556
557 my $sth = $dbh->prepare($sql);
558 $sth->execute($host, $share, $dumpNo);
559 my $count = $sth->rows;
560 print STDERR " found $count items\n" if ($opts{v});
561 while (my $row = $sth->fetchrow_arrayref) {
562 #print STDERR "+ ", $row->[0],"\n";
563 $in_backup_increment->{ $row->[0] } = $row->[1];
564 }
565
566 $sth->finish();
567
568 return $count;
569 }
570
571 #
572 # calculate overhad for one file in tar
573 #
574 sub tar_overhead($) {
575 my $name = shift || '';
576
577 # header, padding of file and two null blocks at end
578 my $len = 4 * $tar_header_length;
579
580 # if filename is longer than 99 chars subtract blocks for
581 # long filename
582 if ( length($name) > 99 ) {
583 $len += int( ( length($name) + $tar_header_length ) / $tar_header_length ) * $tar_header_length;
584 }
585
586 return $len;
587 }
588
589 my $Attr;
590 my $AttrDir;
591
592 sub TarWriteFile
593 {
594 my($hdr, $fh, $tarPathOverride) = @_;
595
596 my $tarPath = $hdr->{relPath};
597 $tarPath = $tarPathOverride if ( defined($tarPathOverride) );
598
599 $tarPath =~ s{//+}{/}g;
600
601 #print STDERR "? $tarPath\n" if ($opts{d});
602 my $size = $in_backup_increment->{$tarPath};
603 return unless (defined($size));
604
605 # is this file too large to fit into MaxArchiveFileSize?
606
607 if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) {
608 print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
609 new_tar_part();
610 }
611
612 #print STDERR "A $tarPath [$size] tell: $current_tar_size\n" if ($opts{d});
613 $items_in_part++;
614
615 if ( defined($PathRemove)
616 && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) {
617 substr($tarPath, 0, length($PathRemove)) = $PathAdd;
618 }
619 $tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// );
620 $tarPath =~ s{//+}{/}g;
621 $hdr->{name} = $tarPath;
622
623 if ( $hdr->{type} == BPC_FTYPE_DIR ) {
624 #
625 # Directory: just write the header
626 #
627 $hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} );
628 TarWriteFileInfo($fh, $hdr);
629 $DirCnt++;
630 } elsif ( $hdr->{type} == BPC_FTYPE_FILE ) {
631 #
632 # Regular file: write the header and file
633 #
634 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
635 if ( !defined($f) ) {
636 print(STDERR "Unable to open file $hdr->{fullPath}\n");
637 $ErrorCnt++;
638 return;
639 }
640 # do we need to split file?
641 if ($hdr->{size} < $max_file_size) {
642 TarWriteFileInfo($fh, $hdr);
643 my($data, $size);
644 while ( $f->read(\$data, $BufSize) > 0 ) {
645 TarWrite($fh, \$data);
646 $size += length($data);
647 }
648 $f->close;
649 TarWritePad($fh, $size);
650 $FileCnt++;
651 $ByteCnt += $size;
652 } else {
653 my $full_size = $hdr->{size};
654 my $orig_name = $hdr->{name};
655 my $max_part_size = $max_file_size - tar_overhead($hdr->{name});
656
657 my $parts = int(($full_size + $max_part_size - 1) / $max_part_size);
658 print STDERR "# splitting $orig_name [$full_size bytes] into $parts parts\n" if ($opts{d});
659 foreach my $subpart ( 1 .. $parts ) {
660 new_tar_part();
661 if ($subpart < $parts) {
662 $hdr->{size} = $max_part_size;
663 } else {
664 $hdr->{size} = $full_size % $max_part_size;
665 }
666 $hdr->{name} = $orig_name . '/' . $subpart;
667 print STDERR "## creating part $subpart ",$hdr->{name}, " [", $hdr->{size}," bytes]\n";
668
669 TarWriteFileInfo($fh, $hdr);
670 my($data, $size);
671 if (0) {
672 for ( 1 .. int($hdr->{size} / $BufSize) ) {
673 my $r_size = $f->read(\$data, $BufSize);
674 die "expected $BufSize bytes read, got $r_size bytes!" if ($r_size != $BufSize);
675 TarWrite($fh, \$data);
676 $size += length($data);
677 }
678 }
679 my $size_left = $hdr->{size} % $BufSize;
680 my $r_size = $f->read(\$data, $size_left);
681 die "expected $size_left bytes last read, got $r_size bytes!" if ($r_size != $size_left);
682
683 TarWrite($fh, \$data);
684 $size += length($data);
685 TarWritePad($fh, $size);
686
687 $items_in_part++;
688 }
689 $f->close;
690 $FileCnt++;
691 $ByteCnt += $full_size;
692 new_tar_part();
693 }
694 } elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) {
695 #
696 # Hardlink file: either write a hardlink or the complete file
697 # depending upon whether the linked-to file will be written
698 # to the archive.
699 #
700 # Start by reading the contents of the link.
701 #
702 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
703 if ( !defined($f) ) {
704 print(STDERR "Unable to open file $hdr->{fullPath}\n");
705 $ErrorCnt++;
706 return;
707 }
708 my $data;
709 while ( $f->read(\$data, $BufSize) > 0 ) {
710 $hdr->{linkname} .= $data;
711 }
712 $f->close;
713 my $done = 0;
714 my $name = $hdr->{linkname};
715 $name =~ s{^\./}{/};
716 if ( $HardLinkExtraFiles{$name} ) {
717 #
718 # Target file will be or was written, so just remember
719 # the hardlink so we can dump it later.
720 #
721 push(@HardLinks, $hdr);
722 $SpecialCnt++;
723 } else {
724 #
725 # Have to dump the original file. Just call the top-level
726 # routine, so that we save the hassle of dealing with
727 # mangling, merging and attributes.
728 #
729 $HardLinkExtraFiles{$hdr->{linkname}} = 1;
730 archiveWrite($fh, $hdr->{linkname}, $hdr->{name});
731 }
732 } elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) {
733 #
734 # Symbolic link: read the symbolic link contents into the header
735 # and write the header.
736 #
737 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
738 if ( !defined($f) ) {
739 print(STDERR "Unable to open symlink file $hdr->{fullPath}\n");
740 $ErrorCnt++;
741 return;
742 }
743 my $data;
744 while ( $f->read(\$data, $BufSize) > 0 ) {
745 $hdr->{linkname} .= $data;
746 }
747 $f->close;
748 $hdr->{size} = 0;
749 TarWriteFileInfo($fh, $hdr);
750 $SpecialCnt++;
751 } elsif ( $hdr->{type} == BPC_FTYPE_CHARDEV
752 || $hdr->{type} == BPC_FTYPE_BLOCKDEV
753 || $hdr->{type} == BPC_FTYPE_FIFO ) {
754 #
755 # Special files: for char and block special we read the
756 # major and minor numbers from a plain file.
757 #
758 if ( $hdr->{type} != BPC_FTYPE_FIFO ) {
759 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0,
760 $hdr->{compress});
761 my $data;
762 if ( !defined($f) || $f->read(\$data, $BufSize) < 0 ) {
763 print(STDERR "Unable to open/read char/block special file"
764 . " $hdr->{fullPath}\n");
765 $f->close if ( defined($f) );
766 $ErrorCnt++;
767 return;
768 }
769 $f->close;
770 if ( $data =~ /(\d+),(\d+)/ ) {
771 $hdr->{devmajor} = $1;
772 $hdr->{devminor} = $2;
773 }
774 }
775 $hdr->{size} = 0;
776 TarWriteFileInfo($fh, $hdr);
777 $SpecialCnt++;
778 } else {
779 print(STDERR "Got unknown type $hdr->{type} for $hdr->{name}\n");
780 $ErrorCnt++;
781 }
782 }
783
784 my $t_fmt = '%Y-%m-%d %H:%M:%S';
785 sub curr_time {
786 return strftime($t_fmt,localtime());
787 }

Properties

Name Value
svn:executable *

  ViewVC Help
Powered by ViewVC 1.1.26