1 |
#!/usr/bin/perl |
2 |
#============================================================= -*-perl-*- |
3 |
# |
4 |
# BackupPC_tarIncCreate: create a tar archive of an existing incremental dump |
5 |
# |
6 |
# |
7 |
# DESCRIPTION |
8 |
# |
9 |
# Usage: BackupPC_tarIncCreate [options] files/directories... |
10 |
# |
11 |
# Flags: |
12 |
# Required options: |
13 |
# |
14 |
# -h host Host from which the tar archive is created. |
15 |
# -n dumpNum Dump number from which the tar archive is created. |
16 |
# A negative number means relative to the end (eg -1 |
17 |
# means the most recent dump, -2 2nd most recent etc). |
18 |
# -s shareName Share name from which the tar archive is created. |
19 |
# |
20 |
# Other options: |
21 |
# -t print summary totals |
22 |
# -r pathRemove path prefix that will be replaced with pathAdd |
23 |
# -p pathAdd new path prefix |
24 |
# -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar) |
25 |
# -w writeBufSz write buffer size (default 1MB) |
26 |
# |
27 |
# The -h, -n and -s options specify which dump is used to generate |
28 |
# the tar archive. The -r and -p options can be used to relocate |
29 |
# the paths in the tar archive so extracted files can be placed |
30 |
# in a location different from their original location. |
31 |
# |
32 |
# AUTHOR |
33 |
# Craig Barratt <cbarratt@users.sourceforge.net> |
34 |
# |
35 |
# COPYRIGHT |
36 |
# Copyright (C) 2001-2003 Craig Barratt |
37 |
# |
38 |
# This program is free software; you can redistribute it and/or modify |
39 |
# it under the terms of the GNU General Public License as published by |
40 |
# the Free Software Foundation; either version 2 of the License, or |
41 |
# (at your option) any later version. |
42 |
# |
43 |
# This program is distributed in the hope that it will be useful, |
44 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
45 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
46 |
# GNU General Public License for more details. |
47 |
# |
48 |
# You should have received a copy of the GNU General Public License |
49 |
# along with this program; if not, write to the Free Software |
50 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
51 |
# |
52 |
#======================================================================== |
53 |
# |
54 |
# Version 2.1.0, released 20 Jun 2004. |
55 |
# |
56 |
# See http://backuppc.sourceforge.net. |
57 |
# |
58 |
#======================================================================== |
59 |
|
60 |
use strict; |
61 |
no utf8; |
62 |
use lib "__INSTALLDIR__/lib"; |
63 |
use File::Path; |
64 |
use Getopt::Std; |
65 |
use DBI; |
66 |
use BackupPC::Lib; |
67 |
use BackupPC::Attrib qw(:all); |
68 |
use BackupPC::FileZIO; |
69 |
use BackupPC::View; |
70 |
use BackupPC::SearchLib; |
71 |
use Data::Dumper; |
72 |
|
73 |
die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); |
74 |
my $TopDir = $bpc->TopDir(); |
75 |
my $BinDir = $bpc->BinDir(); |
76 |
my %Conf = $bpc->Conf(); |
77 |
my @DBCache; |
78 |
my $db_done = 0; |
79 |
my %opts; |
80 |
|
81 |
if ( !getopts("th:n:p:r:s:b:w:", \%opts) || @ARGV < 1 ) { |
82 |
print STDERR <<EOF; |
83 |
usage: $0 [options] files/directories... |
84 |
Required options: |
85 |
-h host host from which the tar archive is created |
86 |
-n dumpNum dump number from which the tar archive is created |
87 |
A negative number means relative to the end (eg -1 |
88 |
means the most recent dump, -2 2nd most recent etc). |
89 |
-s shareName share name from which the tar archive is created |
90 |
|
91 |
Other options: |
92 |
-t print summary totals |
93 |
-r pathRemove path prefix that will be replaced with pathAdd |
94 |
-p pathAdd new path prefix |
95 |
-b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar) |
96 |
-w writeBufSz write buffer size (default 1048576 = 1MB) |
97 |
EOF |
98 |
exit(1); |
99 |
} |
100 |
|
101 |
if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) { |
102 |
print(STDERR "$0: bad host name '$opts{h}'\n"); |
103 |
exit(1); |
104 |
} |
105 |
my $Host = $opts{h}; |
106 |
|
107 |
if ( $opts{n} !~ /^(-?\d+)$/ ) { |
108 |
print(STDERR "$0: bad dump number '$opts{n}'\n"); |
109 |
exit(1); |
110 |
} |
111 |
my $Num = $opts{n}; |
112 |
|
113 |
my @Backups = $bpc->BackupInfoRead($Host); |
114 |
my $FileCnt = 0; |
115 |
my $ByteCnt = 0; |
116 |
my $DirCnt = 0; |
117 |
my $SpecialCnt = 0; |
118 |
my $ErrorCnt = 0; |
119 |
|
120 |
my $i; |
121 |
$Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 ); |
122 |
for ( $i = 0 ; $i < @Backups ; $i++ ) { |
123 |
last if ( $Backups[$i]{num} == $Num ); |
124 |
} |
125 |
if ( $i >= @Backups ) { |
126 |
print(STDERR "$0: bad backup number $Num for host $Host\n"); |
127 |
exit(1); |
128 |
} |
129 |
|
130 |
my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ ); |
131 |
my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ ); |
132 |
if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) { |
133 |
print(STDERR "$0: bad share name '$opts{s}'\n"); |
134 |
exit(1); |
135 |
} |
136 |
our $ShareName = $opts{s}; |
137 |
our $view = BackupPC::View->new($bpc, $Host, \@Backups); |
138 |
|
139 |
# |
140 |
# This constant and the line of code below that uses it are borrowed |
141 |
# from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander. |
142 |
# See www.cpan.org. |
143 |
# |
144 |
# Archive::Tar is Copyright 1997 Calle Dybedahl. All rights reserved. |
145 |
# Copyright 1998 Stephen Zander. All rights reserved. |
146 |
# |
147 |
my $tar_pack_header |
148 |
= 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a6 a2 a32 a32 a8 a8 a155 x12'; |
149 |
my $tar_header_length = 512; |
150 |
|
151 |
my $BufSize = $opts{w} || 1048576; # 1MB or 2^20 |
152 |
my $WriteBuf = ""; |
153 |
my $WriteBufSz = ($opts{b} || 20) * $tar_header_length; |
154 |
|
155 |
my(%UidCache, %GidCache); |
156 |
my(%HardLinkExtraFiles, @HardLinks); |
157 |
|
158 |
# |
159 |
# Write out all the requested files/directories |
160 |
# |
161 |
binmode(STDOUT); |
162 |
my $fh = *STDOUT; |
163 |
if ( $ShareName eq "*" ) { |
164 |
my $PathRemoveOrig = $PathRemove; |
165 |
my $PathAddOrig = $PathAdd; |
166 |
foreach $ShareName ( $view->shareList($Num) ) { |
167 |
#print(STDERR "Doing share ($ShareName)\n"); |
168 |
$PathRemove = "/" if ( !defined($PathRemoveOrig) ); |
169 |
($PathAdd = "/$ShareName/$PathAddOrig") =~ s{//+}{/}g; |
170 |
foreach my $dir ( @ARGV ) { |
171 |
archiveWrite($fh, $dir); |
172 |
} |
173 |
archiveWriteHardLinks($fh); |
174 |
} |
175 |
} else { |
176 |
foreach my $dir ( @ARGV ) { |
177 |
archiveWrite($fh, $dir); |
178 |
} |
179 |
archiveWriteHardLinks($fh); |
180 |
} |
181 |
|
182 |
# |
183 |
# Finish with two null 512 byte headers, and then round out a full |
184 |
# block. |
185 |
# |
186 |
my $data = "\0" x ($tar_header_length * 2); |
187 |
TarWrite($fh, \$data); |
188 |
TarWrite($fh, undef); |
189 |
|
190 |
# |
191 |
# print out totals if requested |
192 |
# |
193 |
if ( $opts{t} ) { |
194 |
print STDERR "Done: $FileCnt files, $ByteCnt bytes, $DirCnt dirs,", |
195 |
" $SpecialCnt specials, $ErrorCnt errors\n"; |
196 |
} |
197 |
if ( $ErrorCnt && !$FileCnt && !$DirCnt ) { |
198 |
# |
199 |
# Got errors, with no files or directories; exit with non-zero |
200 |
# status |
201 |
# |
202 |
exit(1); |
203 |
} |
204 |
exit(0); |
205 |
|
206 |
########################################################################### |
207 |
# Subroutines |
208 |
########################################################################### |
209 |
|
210 |
sub archiveWrite |
211 |
{ |
212 |
my($fh, $dir, $tarPathOverride) = @_; |
213 |
|
214 |
if ( $dir =~ m{(^|/)\.\.(/|$)} ) { |
215 |
print(STDERR "$0: bad directory '$dir'\n"); |
216 |
$ErrorCnt++; |
217 |
return; |
218 |
} |
219 |
$dir = "/" if ( $dir eq "." ); |
220 |
#print(STDERR "calling find with $Num, $ShareName, $dir\n"); |
221 |
|
222 |
if ( $view->find($Num, $ShareName, $dir, 0, \&TarWriteFile, |
223 |
$fh, $tarPathOverride) < 0 ) { |
224 |
print(STDERR "$0: bad share or directory '$ShareName/$dir'\n"); |
225 |
$ErrorCnt++; |
226 |
return; |
227 |
} |
228 |
} |
229 |
|
230 |
# |
231 |
# Write out any hardlinks (if any) |
232 |
# |
233 |
sub archiveWriteHardLinks |
234 |
{ |
235 |
my $fh = @_; |
236 |
foreach my $hdr ( @HardLinks ) { |
237 |
$hdr->{size} = 0; |
238 |
if ( defined($PathRemove) |
239 |
&& substr($hdr->{linkname}, 0, length($PathRemove)+1) |
240 |
eq ".$PathRemove" ) { |
241 |
substr($hdr->{linkname}, 0, length($PathRemove)+1) = ".$PathAdd"; |
242 |
} |
243 |
TarWriteFileInfo($fh, $hdr); |
244 |
} |
245 |
@HardLinks = (); |
246 |
%HardLinkExtraFiles = (); |
247 |
} |
248 |
|
249 |
sub UidLookup |
250 |
{ |
251 |
my($uid) = @_; |
252 |
|
253 |
$UidCache{$uid} = (getpwuid($uid))[0] if ( !exists($UidCache{$uid}) ); |
254 |
return $UidCache{$uid}; |
255 |
} |
256 |
|
257 |
sub GidLookup |
258 |
{ |
259 |
my($gid) = @_; |
260 |
|
261 |
$GidCache{$gid} = (getgrgid($gid))[0] if ( !exists($GidCache{$gid}) ); |
262 |
return $GidCache{$gid}; |
263 |
} |
264 |
|
265 |
sub TarWrite |
266 |
{ |
267 |
my($fh, $dataRef) = @_; |
268 |
|
269 |
if ( !defined($dataRef) ) { |
270 |
# |
271 |
# do flush by padding to a full $WriteBufSz |
272 |
# |
273 |
my $data = "\0" x ($WriteBufSz - length($WriteBuf)); |
274 |
$dataRef = \$data; |
275 |
} |
276 |
if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) { |
277 |
# |
278 |
# just buffer and return |
279 |
# |
280 |
$WriteBuf .= $$dataRef; |
281 |
return; |
282 |
} |
283 |
my $done = $WriteBufSz - length($WriteBuf); |
284 |
if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done)) |
285 |
!= $WriteBufSz ) { |
286 |
print(STDERR "Unable to write to output file ($!)\n"); |
287 |
exit(1); |
288 |
} |
289 |
while ( $done + $WriteBufSz <= length($$dataRef) ) { |
290 |
if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz)) |
291 |
!= $WriteBufSz ) { |
292 |
print(STDERR "Unable to write to output file ($!)\n"); |
293 |
exit(1); |
294 |
} |
295 |
$done += $WriteBufSz; |
296 |
} |
297 |
$WriteBuf = substr($$dataRef, $done); |
298 |
} |
299 |
|
300 |
sub TarWritePad |
301 |
{ |
302 |
my($fh, $size) = @_; |
303 |
|
304 |
if ( $size % $tar_header_length ) { |
305 |
my $data = "\0" x ($tar_header_length - ($size % $tar_header_length)); |
306 |
TarWrite($fh, \$data); |
307 |
} |
308 |
} |
309 |
|
310 |
sub TarWriteHeader |
311 |
{ |
312 |
my($fh, $hdr) = @_; |
313 |
|
314 |
$hdr->{uname} = UidLookup($hdr->{uid}) if ( !defined($hdr->{uname}) ); |
315 |
$hdr->{gname} = GidLookup($hdr->{gid}) if ( !defined($hdr->{gname}) ); |
316 |
my $devmajor = defined($hdr->{devmajor}) ? sprintf("%07o", $hdr->{devmajor}) |
317 |
: ""; |
318 |
my $devminor = defined($hdr->{devminor}) ? sprintf("%07o", $hdr->{devminor}) |
319 |
: ""; |
320 |
my $sizeStr; |
321 |
if ( $hdr->{size} >= 2 * 65536 * 65536 ) { |
322 |
# |
323 |
# GNU extension for files >= 8GB: send size in big-endian binary |
324 |
# |
325 |
$sizeStr = pack("c4 N N", 0x80, 0, 0, 0, |
326 |
$hdr->{size} / (65536 * 65536), |
327 |
$hdr->{size} % (65536 * 65536)); |
328 |
} elsif ( $hdr->{size} >= 1 * 65536 * 65536 ) { |
329 |
# |
330 |
# sprintf octal only handles up to 2^32 - 1 |
331 |
# |
332 |
$sizeStr = sprintf("%03o", $hdr->{size} / (1 << 24)) |
333 |
. sprintf("%08o", $hdr->{size} % (1 << 24)); |
334 |
} else { |
335 |
$sizeStr = sprintf("%011o", $hdr->{size}); |
336 |
} |
337 |
my $data = pack($tar_pack_header, |
338 |
substr($hdr->{name}, 0, 99), |
339 |
sprintf("%07o", $hdr->{mode}), |
340 |
sprintf("%07o", $hdr->{uid}), |
341 |
sprintf("%07o", $hdr->{gid}), |
342 |
$sizeStr, |
343 |
sprintf("%011o", $hdr->{mtime}), |
344 |
"", #checksum field - space padded by pack("A8") |
345 |
$hdr->{type}, |
346 |
substr($hdr->{linkname}, 0, 99), |
347 |
$hdr->{magic} || 'ustar ', |
348 |
$hdr->{version} || ' ', |
349 |
$hdr->{uname}, |
350 |
$hdr->{gname}, |
351 |
$devmajor, |
352 |
$devminor, |
353 |
"" # prefix is empty |
354 |
); |
355 |
substr($data, 148, 7) = sprintf("%06o\0", unpack("%16C*",$data)); |
356 |
TarWrite($fh, \$data); |
357 |
} |
358 |
|
359 |
sub TarWriteFileInfo |
360 |
{ |
361 |
my($fh, $hdr) = @_; |
362 |
|
363 |
# |
364 |
# Handle long link names (symbolic links) |
365 |
# |
366 |
if ( length($hdr->{linkname}) > 99 ) { |
367 |
my %h; |
368 |
my $data = $hdr->{linkname} . "\0"; |
369 |
$h{name} = "././\@LongLink"; |
370 |
$h{type} = "K"; |
371 |
$h{size} = length($data); |
372 |
TarWriteHeader($fh, \%h); |
373 |
TarWrite($fh, \$data); |
374 |
TarWritePad($fh, length($data)); |
375 |
} |
376 |
# |
377 |
# Handle long file names |
378 |
# |
379 |
if ( length($hdr->{name}) > 99 ) { |
380 |
my %h; |
381 |
my $data = $hdr->{name} . "\0"; |
382 |
$h{name} = "././\@LongLink"; |
383 |
$h{type} = "L"; |
384 |
$h{size} = length($data); |
385 |
TarWriteHeader($fh, \%h); |
386 |
TarWrite($fh, \$data); |
387 |
TarWritePad($fh, length($data)); |
388 |
} |
389 |
TarWriteHeader($fh, $hdr); |
390 |
} |
391 |
|
392 |
# |
393 |
# returns 1 if a given directory has files somewhere under it |
394 |
# in a given dump of a given share |
395 |
# |
396 |
sub checkSubDirs($$$$) { |
397 |
my ($dir, $share, $host, $dumpNo) = @_; |
398 |
my $ret; |
399 |
my $dsn = $Conf{SearchDSN}; |
400 |
my $db_user = $Conf{SearchUser} || ''; |
401 |
my $search_sql; |
402 |
|
403 |
print(STDERR $dir); |
404 |
# erase first dot |
405 |
if (substr($dir, 0, 1) == '.') |
406 |
{ |
407 |
$dir = substr($dir, 1, length($dir)); |
408 |
} |
409 |
# erase first slash |
410 |
if (substr($dir, 0, 1) == '/') |
411 |
{ |
412 |
$dir = substr($dir, 1, length($dir)); |
413 |
} |
414 |
# erase last slash |
415 |
if (substr($dir, length($dir)-1, 1) == '/') |
416 |
{ |
417 |
$dir = substr($dir, 0, length($dir)-1); |
418 |
} |
419 |
|
420 |
if (! $db_done) |
421 |
{ |
422 |
print STDERR "doing db..."; |
423 |
my $search_sql = q{ |
424 |
SELECT hosts.name, shares.name, startfiles.name, COUNT(files.*) AS subfiles |
425 |
FROM files startfiles |
426 |
INNER JOIN shares ON (shares.id=startfiles.shareid) |
427 |
INNER JOIN hosts ON (hosts.id=shares.hostid) |
428 |
INNER JOIN backups ON ( |
429 |
backups.num=startfiles.backupnum AND |
430 |
backups.hostid=hosts.id AND backups.shareid=shares.id |
431 |
) |
432 |
LEFT JOIN files ON ( |
433 |
files.backupnum=startfiles.backupnum AND |
434 |
files.shareid=startfiles.shareid AND |
435 |
files.path LIKE startfiles.path || '/%' AND |
436 |
files.type<>startfiles.type AND |
437 |
files.id <> startfiles.id |
438 |
) |
439 |
WHERE |
440 |
hosts.name=? AND |
441 |
shares.name=? AND |
442 |
startfiles.type=? AND |
443 |
startfiles.backupnum=? |
444 |
GROUP BY hosts.name, shares.name, startfiles.name, startfiles.backupnum; |
445 |
}; |
446 |
my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1} ); |
447 |
my $sth = $dbh->prepare($search_sql); |
448 |
$sth->execute($host, $share, BPC_FTYPE_DIR, $dumpNo); |
449 |
print STDERR "done\n"; |
450 |
while (my @r_data = $sth->fetchrow_array()) |
451 |
{ |
452 |
$DBCache[$r_data[0]][$r_data[1]][$r_data[2]] = 1; |
453 |
} |
454 |
|
455 |
$sth->finish(); |
456 |
|
457 |
$DBCache[$host][$share][$dir] = $ret; |
458 |
$dbh->disconnect(); |
459 |
$db_done = 1; |
460 |
} |
461 |
|
462 |
if ($DBCache[$host][$share][$dir] != undef && $DBCache[$host][$share][$dir] == 1) |
463 |
{ |
464 |
return 1; |
465 |
} |
466 |
return 0; |
467 |
} |
468 |
|
469 |
my $Attr; |
470 |
my $AttrDir; |
471 |
|
472 |
sub TarWriteFile |
473 |
{ |
474 |
my($hdr, $fh, $tarPathOverride) = @_; |
475 |
|
476 |
|
477 |
my $tarPath = $hdr->{relPath}; |
478 |
$tarPath = $tarPathOverride if ( defined($tarPathOverride) ); |
479 |
|
480 |
$tarPath =~ s{//+}{/}g; |
481 |
if ( defined($PathRemove) |
482 |
&& substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) { |
483 |
substr($tarPath, 0, length($PathRemove)) = $PathAdd; |
484 |
} |
485 |
$tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// ); |
486 |
$tarPath =~ s{//+}{/}g; |
487 |
$hdr->{name} = $tarPath; |
488 |
|
489 |
if ( $hdr->{type} == BPC_FTYPE_DIR ) { |
490 |
# |
491 |
# Directory: just write the header |
492 |
# |
493 |
|
494 |
|
495 |
$hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} ); |
496 |
# check if it has files under it in the database |
497 |
if ( checkSubDirs($hdr->{path}, $ShareName, $Host, $Num) != 0 ) |
498 |
{ |
499 |
TarWriteFileInfo($fh, $hdr); |
500 |
$DirCnt++; |
501 |
} |
502 |
} elsif ( $hdr->{type} == BPC_FTYPE_FILE ) { |
503 |
# |
504 |
# Regular file: write the header and file |
505 |
# |
506 |
my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); |
507 |
if ( !defined($f) ) { |
508 |
print(STDERR "Unable to open file $hdr->{fullPath}\n"); |
509 |
$ErrorCnt++; |
510 |
return; |
511 |
} |
512 |
TarWriteFileInfo($fh, $hdr); |
513 |
my($data, $size); |
514 |
while ( $f->read(\$data, $BufSize) > 0 ) { |
515 |
TarWrite($fh, \$data); |
516 |
$size += length($data); |
517 |
} |
518 |
$f->close; |
519 |
TarWritePad($fh, $size); |
520 |
$FileCnt++; |
521 |
$ByteCnt += $size; |
522 |
} elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) { |
523 |
# |
524 |
# Hardlink file: either write a hardlink or the complete file |
525 |
# depending upon whether the linked-to file will be written |
526 |
# to the archive. |
527 |
# |
528 |
# Start by reading the contents of the link. |
529 |
# |
530 |
my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); |
531 |
if ( !defined($f) ) { |
532 |
print(STDERR "Unable to open file $hdr->{fullPath}\n"); |
533 |
$ErrorCnt++; |
534 |
return; |
535 |
} |
536 |
my $data; |
537 |
while ( $f->read(\$data, $BufSize) > 0 ) { |
538 |
$hdr->{linkname} .= $data; |
539 |
} |
540 |
$f->close; |
541 |
# |
542 |
# Check @ARGV and the list of hardlinked files we have explicity |
543 |
# dumped to see if we have dumped this file or not |
544 |
# |
545 |
my $done = 0; |
546 |
my $name = $hdr->{linkname}; |
547 |
$name =~ s{^\./}{/}; |
548 |
if ( $HardLinkExtraFiles{$name} ) { |
549 |
$done = 1; |
550 |
} else { |
551 |
foreach my $arg ( @ARGV ) { |
552 |
$arg =~ s{^\./+}{/}; |
553 |
$arg =~ s{/+$}{}; |
554 |
$done = 1 if ( $name eq $arg || $name =~ /^\Q$arg\// ); |
555 |
} |
556 |
} |
557 |
if ( $done ) { |
558 |
# |
559 |
# Target file will be or was written, so just remember |
560 |
# the hardlink so we can dump it later. |
561 |
# |
562 |
push(@HardLinks, $hdr); |
563 |
$SpecialCnt++; |
564 |
} else { |
565 |
# |
566 |
# Have to dump the original file. Just call the top-level |
567 |
# routine, so that we save the hassle of dealing with |
568 |
# mangling, merging and attributes. |
569 |
# |
570 |
$HardLinkExtraFiles{$hdr->{linkname}} = 1; |
571 |
archiveWrite($fh, $hdr->{linkname}, $hdr->{name}); |
572 |
} |
573 |
} elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) { |
574 |
# |
575 |
# Symbolic link: read the symbolic link contents into the header |
576 |
# and write the header. |
577 |
# |
578 |
my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); |
579 |
if ( !defined($f) ) { |
580 |
print(STDERR "Unable to open symlink file $hdr->{fullPath}\n"); |
581 |
$ErrorCnt++; |
582 |
return; |
583 |
} |
584 |
my $data; |
585 |
while ( $f->read(\$data, $BufSize) > 0 ) { |
586 |
$hdr->{linkname} .= $data; |
587 |
} |
588 |
$f->close; |
589 |
$hdr->{size} = 0; |
590 |
TarWriteFileInfo($fh, $hdr); |
591 |
$SpecialCnt++; |
592 |
} elsif ( $hdr->{type} == BPC_FTYPE_CHARDEV |
593 |
|| $hdr->{type} == BPC_FTYPE_BLOCKDEV |
594 |
|| $hdr->{type} == BPC_FTYPE_FIFO ) { |
595 |
# |
596 |
# Special files: for char and block special we read the |
597 |
# major and minor numbers from a plain file. |
598 |
# |
599 |
if ( $hdr->{type} != BPC_FTYPE_FIFO ) { |
600 |
my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, |
601 |
$hdr->{compress}); |
602 |
my $data; |
603 |
if ( !defined($f) || $f->read(\$data, $BufSize) < 0 ) { |
604 |
print(STDERR "Unable to open/read char/block special file" |
605 |
. " $hdr->{fullPath}\n"); |
606 |
$f->close if ( defined($f) ); |
607 |
$ErrorCnt++; |
608 |
return; |
609 |
} |
610 |
$f->close; |
611 |
if ( $data =~ /(\d+),(\d+)/ ) { |
612 |
$hdr->{devmajor} = $1; |
613 |
$hdr->{devminor} = $2; |
614 |
} |
615 |
} |
616 |
$hdr->{size} = 0; |
617 |
TarWriteFileInfo($fh, $hdr); |
618 |
$SpecialCnt++; |
619 |
} else { |
620 |
print(STDERR "Got unknown type $hdr->{type} for $hdr->{name}\n"); |
621 |
$ErrorCnt++; |
622 |
} |
623 |
} |
624 |
|