1 |
#!/usr/bin/perl |
#!/usr/bin/perl -w |
2 |
#============================================================= -*-perl-*- |
#============================================================= -*-perl-*- |
3 |
# |
# |
4 |
# BackupPC_tarCreate: create a tar archive of an existing dump |
# BackupPC_tarIncCreate: create a tar archive of an existing incremental dump |
5 |
# for restore on a client. |
# |
6 |
# |
# |
7 |
# DESCRIPTION |
# DESCRIPTION |
8 |
# |
# |
9 |
# Usage: BackupPC_tarCreate [options] files/directories... |
# Usage: BackupPC_tarIncCreate [options] |
10 |
# |
# |
11 |
# Flags: |
# Flags: |
12 |
# Required options: |
# Required options: |
31 |
# |
# |
32 |
# AUTHOR |
# AUTHOR |
33 |
# Craig Barratt <cbarratt@users.sourceforge.net> |
# Craig Barratt <cbarratt@users.sourceforge.net> |
34 |
|
# Ivan Klaric <iklaric@gmail.com> |
35 |
|
# Dobrica Pavlinusic <dpavlin@rot13.org> |
36 |
# |
# |
37 |
# COPYRIGHT |
# COPYRIGHT |
38 |
# Copyright (C) 2001-2003 Craig Barratt |
# Copyright (C) 2001-2003 Craig Barratt |
70 |
use BackupPC::FileZIO; |
use BackupPC::FileZIO; |
71 |
use BackupPC::View; |
use BackupPC::View; |
72 |
use BackupPC::SearchLib; |
use BackupPC::SearchLib; |
73 |
use Data::Dumper; |
use Time::HiRes qw/time/; |
74 |
|
use POSIX qw/strftime/; |
75 |
|
use File::Which; |
76 |
|
use File::Path; |
77 |
|
use File::Slurp; |
78 |
|
use Data::Dumper; ### FIXME |
79 |
|
|
80 |
die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); |
die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); |
81 |
my $TopDir = $bpc->TopDir(); |
my $TopDir = $bpc->TopDir(); |
82 |
my $BinDir = $bpc->BinDir(); |
my $BinDir = $bpc->BinDir(); |
83 |
my %Conf = $bpc->Conf(); |
my %Conf = $bpc->Conf(); |
84 |
my @DBCache; |
%BackupPC::SearchLib::Conf = %Conf; |
|
my $db_done = 0; |
|
85 |
my %opts; |
my %opts; |
86 |
|
my $in_backup_increment; |
87 |
|
|
88 |
if ( !getopts("th:n:p:r:s:b:w:", \%opts) || @ARGV < 1 ) { |
|
89 |
|
if ( !getopts("th:n:p:r:s:b:w:vdf", \%opts) ) { |
90 |
print STDERR <<EOF; |
print STDERR <<EOF; |
91 |
usage: $0 [options] files/directories... |
usage: $0 [options] |
92 |
Required options: |
Required options: |
93 |
-h host host from which the tar archive is created |
-h host host from which the tar archive is created |
94 |
-n dumpNum dump number from which the tar archive is created |
-n dumpNum dump number from which the tar archive is created |
102 |
-p pathAdd new path prefix |
-p pathAdd new path prefix |
103 |
-b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar) |
-b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar) |
104 |
-w writeBufSz write buffer size (default 1048576 = 1MB) |
-w writeBufSz write buffer size (default 1048576 = 1MB) |
105 |
|
-f overwrite existing parts |
106 |
|
-v verbose output |
107 |
|
-d debug output |
108 |
EOF |
EOF |
109 |
exit(1); |
exit(1); |
110 |
} |
} |
111 |
|
|
112 |
if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) { |
if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) { |
113 |
print(STDERR "$0: bad host name '$opts{h}'\n"); |
die "$0: bad host name '$opts{h}'\n"; |
|
exit(1); |
|
114 |
} |
} |
115 |
my $Host = $opts{h}; |
my $Host = $opts{h}; |
116 |
|
|
117 |
if ( $opts{n} !~ /^(-?\d+)$/ ) { |
if ( $opts{n} !~ /^(-?\d+)$/ ) { |
118 |
print(STDERR "$0: bad dump number '$opts{n}'\n"); |
die "$0: bad dump number '$opts{n}'\n"; |
|
exit(1); |
|
119 |
} |
} |
120 |
my $Num = $opts{n}; |
my $Num = $opts{n}; |
121 |
|
|
122 |
|
my $bin; |
123 |
|
foreach my $c (qw/gzip md5sum tee/) { |
124 |
|
$bin->{$c} = which($c) || die "$0 needs $c, install it\n"; |
125 |
|
} |
126 |
|
|
127 |
my @Backups = $bpc->BackupInfoRead($Host); |
my @Backups = $bpc->BackupInfoRead($Host); |
128 |
my $FileCnt = 0; |
my $FileCnt = 0; |
129 |
my $ByteCnt = 0; |
my $ByteCnt = 0; |
130 |
my $DirCnt = 0; |
my $DirCnt = 0; |
131 |
my $SpecialCnt = 0; |
my $SpecialCnt = 0; |
132 |
my $ErrorCnt = 0; |
my $ErrorCnt = 0; |
133 |
|
my $current_tar_size = 0; |
134 |
|
my $total_increment_size = 0; |
135 |
|
|
136 |
my $i; |
my $i; |
137 |
$Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 ); |
$Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 ); |
139 |
last if ( $Backups[$i]{num} == $Num ); |
last if ( $Backups[$i]{num} == $Num ); |
140 |
} |
} |
141 |
if ( $i >= @Backups ) { |
if ( $i >= @Backups ) { |
142 |
print(STDERR "$0: bad backup number $Num for host $Host\n"); |
die "$0: bad backup number $Num for host $Host\n"; |
|
exit(1); |
|
143 |
} |
} |
144 |
|
|
145 |
my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ ); |
my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ ); |
146 |
my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ ); |
my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ ); |
147 |
if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) { |
if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) { |
148 |
print(STDERR "$0: bad share name '$opts{s}'\n"); |
die "$0: bad share name '$opts{s}'\n"; |
|
exit(1); |
|
149 |
} |
} |
150 |
our $ShareName = $opts{s}; |
our $ShareName = $opts{s}; |
151 |
our $view = BackupPC::View->new($bpc, $Host, \@Backups); |
our $view = BackupPC::View->new($bpc, $Host, \@Backups); |
152 |
|
|
153 |
|
# database |
154 |
|
|
155 |
|
my $dsn = $Conf{SearchDSN}; |
156 |
|
my $db_user = $Conf{SearchUser} || ''; |
157 |
|
|
158 |
|
my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 0} ); |
159 |
|
|
160 |
|
my $sth_inc_size = $dbh->prepare(qq{ |
161 |
|
update backups set |
162 |
|
inc_size = ?, |
163 |
|
inc_deleted = false |
164 |
|
where id = ? |
165 |
|
}); |
166 |
|
my $sth_backup_parts = $dbh->prepare(qq{ |
167 |
|
insert into backup_parts ( |
168 |
|
backup_id, |
169 |
|
part_nr, |
170 |
|
tar_size, |
171 |
|
size, |
172 |
|
md5, |
173 |
|
items |
174 |
|
) values (?,?,?,?,?,?) |
175 |
|
}); |
176 |
|
|
177 |
# |
# |
178 |
# This constant and the line of code below that uses it are borrowed |
# This constant and the line of code below that uses it are borrowed |
179 |
# from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander. |
# from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander. |
196 |
# |
# |
197 |
# Write out all the requested files/directories |
# Write out all the requested files/directories |
198 |
# |
# |
199 |
binmode(STDOUT); |
|
200 |
my $fh = *STDOUT; |
my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar"; |
201 |
if ( $ShareName eq "*" ) { |
|
202 |
my $PathRemoveOrig = $PathRemove; |
my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}; |
203 |
my $PathAddOrig = $PathAdd; |
die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir); |
204 |
foreach $ShareName ( $view->shareList($Num) ) { |
|
205 |
#print(STDERR "Doing share ($ShareName)\n"); |
my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)"; |
206 |
$PathRemove = "/" if ( !defined($PathRemoveOrig) ); |
|
207 |
($PathAdd = "/$ShareName/$PathAddOrig") =~ s{//+}{/}g; |
my $tar_path_final = $tar_dir . '/' . $tar_file; |
208 |
foreach my $dir ( @ARGV ) { |
my $tar_path = $tar_path_final . '.tmp'; |
209 |
archiveWrite($fh, $dir); |
|
210 |
} |
$tar_path =~ s#//#/#g; |
211 |
archiveWriteHardLinks($fh); |
|
212 |
} |
my $sth = $dbh->prepare(qq{ |
213 |
} else { |
SELECT |
214 |
foreach my $dir ( @ARGV ) { |
backups.id |
215 |
archiveWrite($fh, $dir); |
FROM backups |
216 |
} |
JOIN shares on shares.id = shareid |
217 |
archiveWriteHardLinks($fh); |
JOIN hosts on hosts.id = shares.hostid |
218 |
|
WHERE hosts.name = ? and shares.name = ? and backups.num = ? |
219 |
|
}); |
220 |
|
$sth->execute($Host, $ShareName, $Num); |
221 |
|
my ($backup_id) = $sth->fetchrow_array; |
222 |
|
$sth->finish; |
223 |
|
|
224 |
|
|
225 |
|
# delete exising backup_parts |
226 |
|
my $sth_delete_backup_parts = $dbh->prepare(qq{ |
227 |
|
delete from backup_parts |
228 |
|
where backup_id = ? |
229 |
|
}); |
230 |
|
$sth_delete_backup_parts->execute($backup_id); |
231 |
|
|
232 |
|
|
233 |
|
print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d}); |
234 |
|
|
235 |
|
if (-e $tar_path_final) { |
236 |
|
if ($opts{f}) { |
237 |
|
rmtree $tar_path_final || die "can't remove $tar_path_final: $!"; |
238 |
|
} else { |
239 |
|
die "$tar_path_final allready exists\n"; |
240 |
|
} |
241 |
} |
} |
242 |
|
|
243 |
# |
my $fh; |
244 |
# Finish with two null 512 byte headers, and then round out a full |
my $part = 0; |
245 |
# block. |
my $no_files = 0; |
246 |
# |
my $items_in_part = 0; |
247 |
my $data = "\0" x ($tar_header_length * 2); |
|
248 |
TarWrite($fh, \$data); |
sub new_tar_part { |
249 |
TarWrite($fh, undef); |
my $arg = {@_}; |
250 |
|
|
251 |
|
if ($fh) { |
252 |
|
return if ($current_tar_size == 0); |
253 |
|
|
254 |
|
print STDERR "\n\t+ $part:"; |
255 |
|
|
256 |
|
# |
257 |
|
# Finish with two null 512 byte headers, |
258 |
|
# and then round out a full block. |
259 |
|
# |
260 |
|
my $data = "\0" x ($tar_header_length * 2); |
261 |
|
TarWrite($fh, \$data); |
262 |
|
TarWrite($fh, undef); |
263 |
|
|
264 |
|
close($fh) || die "can't close archive part $part: $!"; |
265 |
|
|
266 |
|
my $file = $tar_path . '/' . $part; |
267 |
|
|
268 |
|
my $md5 = read_file( $file . '.md5' ) || die "can't read md5sum file ${file}.md5"; |
269 |
|
$md5 =~ s/\s.*$//; |
270 |
|
|
271 |
|
my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz"; |
272 |
|
|
273 |
|
print "$file, $size bytes, $items_in_part items"; |
274 |
|
|
275 |
|
$sth_backup_parts->execute( |
276 |
|
$backup_id, |
277 |
|
$part, |
278 |
|
$current_tar_size, |
279 |
|
$size, |
280 |
|
$md5, |
281 |
|
$items_in_part, |
282 |
|
); |
283 |
|
|
284 |
|
$total_increment_size += $size; |
285 |
|
|
286 |
|
if ($arg->{close}) { |
287 |
|
|
288 |
|
sub move($$) { |
289 |
|
my ($from,$to) = @_; |
290 |
|
print STDERR "# rename $from -> $to\n" if ($opts{d}); |
291 |
|
rename $from, $to || die "can't move $from -> $to: $!\n"; |
292 |
|
} |
293 |
|
|
294 |
|
if ($part == 1) { |
295 |
|
print STDERR " single" if ($opts{v}); |
296 |
|
move("${tar_path}/1.tar.gz", "${tar_path_final}.tar.gz"); |
297 |
|
move("${tar_path}/1.md5", "${tar_path_final}.md5"); |
298 |
|
rmtree $tar_path or die "can't remove temporary dir $tar_path: $!"; |
299 |
|
} else { |
300 |
|
print STDERR " [last]" if ($opts{v}); |
301 |
|
move("${tar_path}", "${tar_path_final}"); |
302 |
|
|
303 |
|
# if this archive was single part, remove it |
304 |
|
foreach my $suffix (qw/.tar.gz .md5/) { |
305 |
|
my $path = $tar_path_final . $suffix; |
306 |
|
unlink $path if (-e $path); |
307 |
|
} |
308 |
|
} |
309 |
|
|
310 |
|
$sth_inc_size->execute( |
311 |
|
$total_increment_size, |
312 |
|
$backup_id |
313 |
|
); |
314 |
|
|
315 |
|
print "\n\ttotal $total_increment_size bytes"; |
316 |
|
|
317 |
|
return; |
318 |
|
} |
319 |
|
|
320 |
|
} |
321 |
|
|
322 |
|
$part++; |
323 |
|
|
324 |
|
# if this is first part, create directory |
325 |
|
|
326 |
|
if ($part == 1) { |
327 |
|
if (-e $tar_path) { |
328 |
|
print STDERR "# deleting existing $tar_path\n" if ($opts{d}); |
329 |
|
rmtree($tar_path); |
330 |
|
} |
331 |
|
mkdir($tar_path) || die "can't create directory $tar_path: $!"; |
332 |
|
|
333 |
|
sub abort_cleanup { |
334 |
|
print STDERR "ABORTED: cleanup temp dir "; |
335 |
|
rmtree($tar_path); |
336 |
|
$dbh->rollback; |
337 |
|
exit 1; |
338 |
|
} |
339 |
|
|
340 |
|
$SIG{'INT'} = \&abort_cleanup; |
341 |
|
$SIG{'QUIT'} = \&abort_cleanup; |
342 |
|
$SIG{'__DIE__'} = \&abort_cleanup; |
343 |
|
|
344 |
|
} |
345 |
|
|
346 |
|
my $file = $tar_path . '/' . $part; |
347 |
|
|
348 |
|
# |
349 |
|
# create comprex pipe which will pass output through gzip |
350 |
|
# for compression, create file on disk using tee |
351 |
|
# and pipe same output to md5sum to create checksum |
352 |
|
# |
353 |
|
|
354 |
|
my $cmd = '| ' . $bin->{'gzip'} . ' ' . $Conf{GzipLevel} . ' ' . |
355 |
|
'| ' . $bin->{'tee'} . ' ' . $file . '.tar.gz' . ' ' . |
356 |
|
'| ' . $bin->{'md5sum'} . ' - > ' . $file . '.md5'; |
357 |
|
|
358 |
|
print STDERR "## $cmd\n" if ($opts{d}); |
359 |
|
|
360 |
|
open($fh, $cmd) or die "can't open $cmd: $!"; |
361 |
|
binmode($fh); |
362 |
|
|
363 |
|
$current_tar_size = 0; |
364 |
|
$items_in_part = 0; |
365 |
|
} |
366 |
|
|
367 |
|
new_tar_part(); |
368 |
|
|
369 |
|
if (seedCache($Host, $ShareName, $Num)) { |
370 |
|
archiveWrite($fh, '/'); |
371 |
|
archiveWriteHardLinks($fh); |
372 |
|
new_tar_part( close => 1 ); |
373 |
|
} else { |
374 |
|
print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v}); |
375 |
|
# remove temporary files if there are no files |
376 |
|
rmtree($tar_path); |
377 |
|
|
378 |
|
my $sth = $dbh->prepare(qq{ |
379 |
|
update backups set inc_size = 0, inc_deleted = true |
380 |
|
where id = ? |
381 |
|
}); |
382 |
|
$sth->execute($backup_id); |
383 |
|
|
384 |
|
} |
385 |
|
|
386 |
# |
# |
387 |
# print out totals if requested |
# print out totals if requested |
395 |
# Got errors, with no files or directories; exit with non-zero |
# Got errors, with no files or directories; exit with non-zero |
396 |
# status |
# status |
397 |
# |
# |
398 |
exit(1); |
die "got errors or no files\n"; |
399 |
} |
} |
400 |
exit(0); |
|
401 |
|
$sth_inc_size->finish; |
402 |
|
$sth_backup_parts->finish; |
403 |
|
|
404 |
|
$dbh->commit || die "can't commit changes to database"; |
405 |
|
$dbh->disconnect(); |
406 |
|
|
407 |
|
exit; |
408 |
|
|
409 |
########################################################################### |
########################################################################### |
410 |
# Subroutines |
# Subroutines |
469 |
{ |
{ |
470 |
my($fh, $dataRef) = @_; |
my($fh, $dataRef) = @_; |
471 |
|
|
472 |
|
|
473 |
if ( !defined($dataRef) ) { |
if ( !defined($dataRef) ) { |
474 |
# |
# |
475 |
# do flush by padding to a full $WriteBufSz |
# do flush by padding to a full $WriteBufSz |
477 |
my $data = "\0" x ($WriteBufSz - length($WriteBuf)); |
my $data = "\0" x ($WriteBufSz - length($WriteBuf)); |
478 |
$dataRef = \$data; |
$dataRef = \$data; |
479 |
} |
} |
480 |
|
|
481 |
|
# poor man's tell :-) |
482 |
|
$current_tar_size += length($$dataRef); |
483 |
|
|
484 |
if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) { |
if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) { |
485 |
# |
# |
486 |
# just buffer and return |
# just buffer and return |
491 |
my $done = $WriteBufSz - length($WriteBuf); |
my $done = $WriteBufSz - length($WriteBuf); |
492 |
if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done)) |
if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done)) |
493 |
!= $WriteBufSz ) { |
!= $WriteBufSz ) { |
494 |
print(STDERR "Unable to write to output file ($!)\n"); |
die "Unable to write to output file ($!)\n"; |
|
exit(1); |
|
495 |
} |
} |
496 |
while ( $done + $WriteBufSz <= length($$dataRef) ) { |
while ( $done + $WriteBufSz <= length($$dataRef) ) { |
497 |
if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz)) |
if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz)) |
498 |
!= $WriteBufSz ) { |
!= $WriteBufSz ) { |
499 |
print(STDERR "Unable to write to output file ($!)\n"); |
die "Unable to write to output file ($!)\n"; |
|
exit(1); |
|
500 |
} |
} |
501 |
$done += $WriteBufSz; |
$done += $WriteBufSz; |
502 |
} |
} |
596 |
} |
} |
597 |
|
|
598 |
# |
# |
599 |
# returns 1 if a given directory has files somewhere under it |
# seed cache of files in this increment |
|
# in a given dump of a given share |
|
600 |
# |
# |
601 |
sub checkSubDirs($$$$) { |
sub seedCache($$$) { |
602 |
my ($dir, $share, $host, $dumpNo) = @_; |
my ($host, $share, $dumpNo) = @_; |
603 |
my $ret; |
|
604 |
my $dsn = $Conf{SearchDSN}; |
print STDERR curr_time(), "$host:$share #$dumpNo" if ($opts{v}); |
605 |
my $db_user = $Conf{SearchUser} || ''; |
my $sql = q{ |
606 |
my $search_sql; |
SELECT path,size |
607 |
|
FROM files |
608 |
print(STDERR $dir); |
JOIN shares on shares.id = shareid |
609 |
# erase first dot |
JOIN hosts on hosts.id = shares.hostid |
610 |
if (substr($dir, 0, 1) == '.') |
WHERE hosts.name = ? and shares.name = ? and backupnum = ? |
611 |
{ |
}; |
612 |
$dir = substr($dir, 1, length($dir)); |
|
613 |
} |
my $sth = $dbh->prepare($sql); |
614 |
# erase first slash |
$sth->execute($host, $share, $dumpNo); |
615 |
if (substr($dir, 0, 1) == '/') |
my $count = $sth->rows; |
616 |
{ |
print STDERR " $count items, parts:" if ($opts{v}); |
617 |
$dir = substr($dir, 1, length($dir)); |
while (my $row = $sth->fetchrow_arrayref) { |
618 |
} |
#print STDERR "+ ", $row->[0],"\n"; |
619 |
# erase last slash |
$in_backup_increment->{ $row->[0] } = $row->[1]; |
|
if (substr($dir, length($dir)-1, 1) == '/') |
|
|
{ |
|
|
$dir = substr($dir, 0, length($dir)-1); |
|
620 |
} |
} |
621 |
|
|
622 |
|
$sth->finish(); |
623 |
|
|
624 |
if (! $db_done) |
return $count; |
625 |
{ |
} |
|
print STDERR "doing db..."; |
|
|
my $search_sql = q{ |
|
|
SELECT hosts.name, shares.name, startfiles.name, COUNT(files.*) AS subfiles |
|
|
FROM files startfiles |
|
|
INNER JOIN shares ON (shares.id=startfiles.shareid) |
|
|
INNER JOIN hosts ON (hosts.id=shares.hostid) |
|
|
INNER JOIN backups ON ( |
|
|
backups.num=startfiles.backupnum AND |
|
|
backups.hostid=hosts.id AND backups.shareid=shares.id |
|
|
) |
|
|
LEFT JOIN files ON ( |
|
|
files.backupnum=startfiles.backupnum AND |
|
|
files.shareid=startfiles.shareid AND |
|
|
files.path LIKE startfiles.path || '/%' AND |
|
|
files.type<>startfiles.type AND |
|
|
files.id <> startfiles.id |
|
|
) |
|
|
WHERE |
|
|
hosts.name=? AND |
|
|
shares.name=? AND |
|
|
startfiles.type=? AND |
|
|
startfiles.backupnum=? |
|
|
GROUP BY hosts.name, shares.name, startfiles.name, startfiles.backupnum; |
|
|
}; |
|
|
my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1} ); |
|
|
my $sth = $dbh->prepare($search_sql); |
|
|
$sth->execute($host, $share, BPC_FTYPE_DIR, $dumpNo); |
|
|
print STDERR "done\n"; |
|
|
while (my @r_data = $sth->fetchrow_array()) |
|
|
{ |
|
|
$DBCache[$r_data[0]][$r_data[1]][$r_data[2]] = 1; |
|
|
} |
|
|
|
|
|
$sth->finish(); |
|
626 |
|
|
627 |
$DBCache[$host][$share][$dir] = $ret; |
# |
628 |
$dbh->disconnect(); |
# calculate overhad for one file in tar |
629 |
$db_done = 1; |
# |
630 |
} |
sub tar_overhead($) { |
631 |
|
my $name = shift || ''; |
632 |
|
|
633 |
if ($DBCache[$host][$share][$dir] != undef && $DBCache[$host][$share][$dir] == 1) |
# header, padding of file and two null blocks at end |
634 |
{ |
my $len = 4 * $tar_header_length; |
635 |
return 1; |
|
636 |
|
# if filename is longer than 99 chars subtract blocks for |
637 |
|
# long filename |
638 |
|
if ( length($name) > 99 ) { |
639 |
|
$len += int( ( length($name) + $tar_header_length ) / $tar_header_length ) * $tar_header_length; |
640 |
} |
} |
641 |
return 0; |
|
642 |
|
return $len; |
643 |
} |
} |
644 |
|
|
645 |
my $Attr; |
my $Attr; |
649 |
{ |
{ |
650 |
my($hdr, $fh, $tarPathOverride) = @_; |
my($hdr, $fh, $tarPathOverride) = @_; |
651 |
|
|
|
|
|
652 |
my $tarPath = $hdr->{relPath}; |
my $tarPath = $hdr->{relPath}; |
653 |
$tarPath = $tarPathOverride if ( defined($tarPathOverride) ); |
$tarPath = $tarPathOverride if ( defined($tarPathOverride) ); |
654 |
|
|
655 |
$tarPath =~ s{//+}{/}g; |
$tarPath =~ s{//+}{/}g; |
656 |
|
|
657 |
|
#print STDERR "? $tarPath\n" if ($opts{d}); |
658 |
|
my $size = $in_backup_increment->{$tarPath}; |
659 |
|
return unless (defined($size)); |
660 |
|
|
661 |
|
# is this file too large to fit into MaxArchiveFileSize? |
662 |
|
|
663 |
|
if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) { |
664 |
|
print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d}); |
665 |
|
new_tar_part(); |
666 |
|
} |
667 |
|
|
668 |
|
#print STDERR "A $tarPath [$size] tell: $current_tar_size\n" if ($opts{d}); |
669 |
|
$items_in_part++; |
670 |
|
|
671 |
if ( defined($PathRemove) |
if ( defined($PathRemove) |
672 |
&& substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) { |
&& substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) { |
673 |
substr($tarPath, 0, length($PathRemove)) = $PathAdd; |
substr($tarPath, 0, length($PathRemove)) = $PathAdd; |
680 |
# |
# |
681 |
# Directory: just write the header |
# Directory: just write the header |
682 |
# |
# |
|
|
|
|
|
|
683 |
$hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} ); |
$hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} ); |
684 |
# check if it has files under it in the database |
TarWriteFileInfo($fh, $hdr); |
685 |
if ( checkSubDirs($hdr->{path}, $ShareName, $Host, $Num) != 0 ) |
$DirCnt++; |
|
{ |
|
|
TarWriteFileInfo($fh, $hdr); |
|
|
$DirCnt++; |
|
|
} |
|
686 |
} elsif ( $hdr->{type} == BPC_FTYPE_FILE ) { |
} elsif ( $hdr->{type} == BPC_FTYPE_FILE ) { |
687 |
# |
# |
688 |
# Regular file: write the header and file |
# Regular file: write the header and file |
693 |
$ErrorCnt++; |
$ErrorCnt++; |
694 |
return; |
return; |
695 |
} |
} |
696 |
TarWriteFileInfo($fh, $hdr); |
# do we need to split file? |
697 |
my($data, $size); |
if ($hdr->{size} < $max_file_size) { |
698 |
while ( $f->read(\$data, $BufSize) > 0 ) { |
TarWriteFileInfo($fh, $hdr); |
699 |
TarWrite($fh, \$data); |
my($data, $size); |
700 |
$size += length($data); |
while ( $f->read(\$data, $BufSize) > 0 ) { |
701 |
} |
TarWrite($fh, \$data); |
702 |
$f->close; |
$size += length($data); |
703 |
TarWritePad($fh, $size); |
} |
704 |
|
$f->close; |
705 |
|
TarWritePad($fh, $size); |
706 |
$FileCnt++; |
$FileCnt++; |
707 |
$ByteCnt += $size; |
$ByteCnt += $size; |
708 |
|
} else { |
709 |
|
my $full_size = $hdr->{size}; |
710 |
|
my $orig_name = $hdr->{name}; |
711 |
|
my $max_part_size = $max_file_size - tar_overhead($hdr->{name}); |
712 |
|
|
713 |
|
my $parts = int(($full_size + $max_part_size - 1) / $max_part_size); |
714 |
|
print STDERR "# splitting $orig_name [$full_size bytes] into $parts parts\n" if ($opts{d}); |
715 |
|
foreach my $subpart ( 1 .. $parts ) { |
716 |
|
new_tar_part(); |
717 |
|
if ($subpart < $parts) { |
718 |
|
$hdr->{size} = $max_part_size; |
719 |
|
} else { |
720 |
|
$hdr->{size} = $full_size % $max_part_size; |
721 |
|
} |
722 |
|
$hdr->{name} = $orig_name . '/' . $subpart; |
723 |
|
print STDERR "## creating part $subpart ",$hdr->{name}, " [", $hdr->{size}," bytes]\n"; |
724 |
|
|
725 |
|
TarWriteFileInfo($fh, $hdr); |
726 |
|
my($data, $size); |
727 |
|
if (0) { |
728 |
|
for ( 1 .. int($hdr->{size} / $BufSize) ) { |
729 |
|
my $r_size = $f->read(\$data, $BufSize); |
730 |
|
die "expected $BufSize bytes read, got $r_size bytes!" if ($r_size != $BufSize); |
731 |
|
TarWrite($fh, \$data); |
732 |
|
$size += length($data); |
733 |
|
} |
734 |
|
} |
735 |
|
my $size_left = $hdr->{size} % $BufSize; |
736 |
|
my $r_size = $f->read(\$data, $size_left); |
737 |
|
die "expected $size_left bytes last read, got $r_size bytes!" if ($r_size != $size_left); |
738 |
|
|
739 |
|
TarWrite($fh, \$data); |
740 |
|
$size += length($data); |
741 |
|
TarWritePad($fh, $size); |
742 |
|
|
743 |
|
$items_in_part++; |
744 |
|
} |
745 |
|
$f->close; |
746 |
|
$FileCnt++; |
747 |
|
$ByteCnt += $full_size; |
748 |
|
new_tar_part(); |
749 |
|
} |
750 |
} elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) { |
} elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) { |
751 |
# |
# |
752 |
# Hardlink file: either write a hardlink or the complete file |
# Hardlink file: either write a hardlink or the complete file |
753 |
# depending upon whether the linked-to file will be written |
# depending upon whether the linked-to file will be written |
754 |
# to the archive. |
# to the archive. |
755 |
# |
# |
756 |
# Start by reading the contents of the link. |
# Start by reading the contents of the link. |
757 |
# |
# |
758 |
my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); |
my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); |
759 |
if ( !defined($f) ) { |
if ( !defined($f) ) { |
760 |
print(STDERR "Unable to open file $hdr->{fullPath}\n"); |
print(STDERR "Unable to open file $hdr->{fullPath}\n"); |
765 |
while ( $f->read(\$data, $BufSize) > 0 ) { |
while ( $f->read(\$data, $BufSize) > 0 ) { |
766 |
$hdr->{linkname} .= $data; |
$hdr->{linkname} .= $data; |
767 |
} |
} |
768 |
$f->close; |
$f->close; |
769 |
# |
my $done = 0; |
770 |
# Check @ARGV and the list of hardlinked files we have explicity |
my $name = $hdr->{linkname}; |
771 |
# dumped to see if we have dumped this file or not |
$name =~ s{^\./}{/}; |
772 |
# |
if ( $HardLinkExtraFiles{$name} ) { |
773 |
my $done = 0; |
# |
774 |
my $name = $hdr->{linkname}; |
# Target file will be or was written, so just remember |
775 |
$name =~ s{^\./}{/}; |
# the hardlink so we can dump it later. |
776 |
if ( $HardLinkExtraFiles{$name} ) { |
# |
777 |
$done = 1; |
push(@HardLinks, $hdr); |
778 |
} else { |
$SpecialCnt++; |
779 |
foreach my $arg ( @ARGV ) { |
} else { |
780 |
$arg =~ s{^\./+}{/}; |
# |
781 |
$arg =~ s{/+$}{}; |
# Have to dump the original file. Just call the top-level |
782 |
$done = 1 if ( $name eq $arg || $name =~ /^\Q$arg\// ); |
# routine, so that we save the hassle of dealing with |
783 |
} |
# mangling, merging and attributes. |
784 |
} |
# |
785 |
if ( $done ) { |
$HardLinkExtraFiles{$hdr->{linkname}} = 1; |
786 |
# |
archiveWrite($fh, $hdr->{linkname}, $hdr->{name}); |
787 |
# Target file will be or was written, so just remember |
} |
|
# the hardlink so we can dump it later. |
|
|
# |
|
|
push(@HardLinks, $hdr); |
|
|
$SpecialCnt++; |
|
|
} else { |
|
|
# |
|
|
# Have to dump the original file. Just call the top-level |
|
|
# routine, so that we save the hassle of dealing with |
|
|
# mangling, merging and attributes. |
|
|
# |
|
|
$HardLinkExtraFiles{$hdr->{linkname}} = 1; |
|
|
archiveWrite($fh, $hdr->{linkname}, $hdr->{name}); |
|
|
} |
|
788 |
} elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) { |
} elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) { |
789 |
# |
# |
790 |
# Symbolic link: read the symbolic link contents into the header |
# Symbolic link: read the symbolic link contents into the header |
836 |
$ErrorCnt++; |
$ErrorCnt++; |
837 |
} |
} |
838 |
} |
} |
839 |
|
|
840 |
|
my $t_fmt = '%Y-%m-%d %H:%M:%S'; |
841 |
|
sub curr_time { |
842 |
|
return strftime($t_fmt,localtime()); |
843 |
|
} |