74 |
use POSIX qw/strftime/; |
use POSIX qw/strftime/; |
75 |
use File::Which; |
use File::Which; |
76 |
use File::Path; |
use File::Path; |
77 |
|
use File::Slurp; |
78 |
use Data::Dumper; ### FIXME |
use Data::Dumper; ### FIXME |
79 |
|
|
80 |
die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); |
die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); |
86 |
my $in_backup_increment; |
my $in_backup_increment; |
87 |
|
|
88 |
|
|
89 |
if ( !getopts("th:n:p:r:s:b:w:vd", \%opts) ) { |
if ( !getopts("th:n:p:r:s:b:w:vdf", \%opts) ) { |
90 |
print STDERR <<EOF; |
print STDERR <<EOF; |
91 |
usage: $0 [options] |
usage: $0 [options] |
92 |
Required options: |
Required options: |
102 |
-p pathAdd new path prefix |
-p pathAdd new path prefix |
103 |
-b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar) |
-b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar) |
104 |
-w writeBufSz write buffer size (default 1048576 = 1MB) |
-w writeBufSz write buffer size (default 1048576 = 1MB) |
105 |
|
-f overwrite existing parts |
106 |
-v verbose output |
-v verbose output |
107 |
-d debug output |
-d debug output |
108 |
EOF |
EOF |
110 |
} |
} |
111 |
|
|
112 |
if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) { |
if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) { |
113 |
print(STDERR "$0: bad host name '$opts{h}'\n"); |
die "$0: bad host name '$opts{h}'\n"; |
|
exit(1); |
|
114 |
} |
} |
115 |
my $Host = $opts{h}; |
my $Host = $opts{h}; |
116 |
|
|
117 |
if ( $opts{n} !~ /^(-?\d+)$/ ) { |
if ( $opts{n} !~ /^(-?\d+)$/ ) { |
118 |
print(STDERR "$0: bad dump number '$opts{n}'\n"); |
die "$0: bad dump number '$opts{n}'\n"; |
|
exit(1); |
|
119 |
} |
} |
120 |
my $Num = $opts{n}; |
my $Num = $opts{n}; |
121 |
|
|
131 |
my $SpecialCnt = 0; |
my $SpecialCnt = 0; |
132 |
my $ErrorCnt = 0; |
my $ErrorCnt = 0; |
133 |
my $current_tar_size = 0; |
my $current_tar_size = 0; |
134 |
|
my $total_increment_size = 0; |
135 |
|
|
136 |
my $i; |
my $i; |
137 |
$Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 ); |
$Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 ); |
139 |
last if ( $Backups[$i]{num} == $Num ); |
last if ( $Backups[$i]{num} == $Num ); |
140 |
} |
} |
141 |
if ( $i >= @Backups ) { |
if ( $i >= @Backups ) { |
142 |
print(STDERR "$0: bad backup number $Num for host $Host\n"); |
die "$0: bad backup number $Num for host $Host\n"; |
|
exit(1); |
|
143 |
} |
} |
144 |
|
|
145 |
my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ ); |
my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ ); |
146 |
my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ ); |
my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ ); |
147 |
if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) { |
if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) { |
148 |
print(STDERR "$0: bad share name '$opts{s}'\n"); |
die "$0: bad share name '$opts{s}'\n"; |
|
exit(1); |
|
149 |
} |
} |
150 |
our $ShareName = $opts{s}; |
our $ShareName = $opts{s}; |
151 |
our $view = BackupPC::View->new($bpc, $Host, \@Backups); |
our $view = BackupPC::View->new($bpc, $Host, \@Backups); |
152 |
|
|
153 |
|
# database |
154 |
|
|
155 |
|
my $dsn = $Conf{SearchDSN}; |
156 |
|
my $db_user = $Conf{SearchUser} || ''; |
157 |
|
|
158 |
|
my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 0} ); |
159 |
|
|
160 |
|
my $sth_inc_size = $dbh->prepare(qq{ |
161 |
|
update backups set |
162 |
|
inc_size = ?, |
163 |
|
parts = ?, |
164 |
|
inc_deleted = false |
165 |
|
where id = ? |
166 |
|
}); |
167 |
|
my $sth_backup_parts = $dbh->prepare(qq{ |
168 |
|
insert into backup_parts ( |
169 |
|
backup_id, |
170 |
|
part_nr, |
171 |
|
tar_size, |
172 |
|
size, |
173 |
|
md5, |
174 |
|
items |
175 |
|
) values (?,?,?,?,?,?) |
176 |
|
}); |
177 |
|
|
178 |
# |
# |
179 |
# This constant and the line of code below that uses it are borrowed |
# This constant and the line of code below that uses it are borrowed |
180 |
# from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander. |
# from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander. |
199 |
# |
# |
200 |
|
|
201 |
my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar"; |
my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar"; |
|
$max_file_size *= 1024; |
|
202 |
|
|
203 |
my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}; |
my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}; |
204 |
die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir); |
die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir); |
205 |
|
|
206 |
my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)"; |
my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)"; |
207 |
|
|
208 |
my $tar_path = $tar_dir . '/' . $tar_file . '.tmp'; |
my $tar_path_final = $tar_dir . '/' . $tar_file; |
209 |
|
my $tar_path = $tar_path_final . '.tmp'; |
210 |
|
|
211 |
$tar_path =~ s#//#/#g; |
$tar_path =~ s#//#/#g; |
212 |
|
|
213 |
print STDERR "working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d}); |
my $sth = $dbh->prepare(qq{ |
214 |
|
SELECT |
215 |
|
backups.id |
216 |
|
FROM backups |
217 |
|
JOIN shares on shares.id = shareid |
218 |
|
JOIN hosts on hosts.id = shares.hostid |
219 |
|
WHERE hosts.name = ? and shares.name = ? and backups.num = ? |
220 |
|
}); |
221 |
|
$sth->execute($Host, $ShareName, $Num); |
222 |
|
my ($backup_id) = $sth->fetchrow_array; |
223 |
|
$sth->finish; |
224 |
|
|
225 |
|
|
226 |
|
# delete exising backup_parts |
227 |
|
my $sth_delete_backup_parts = $dbh->prepare(qq{ |
228 |
|
delete from backup_parts |
229 |
|
where backup_id = ? |
230 |
|
}); |
231 |
|
$sth_delete_backup_parts->execute($backup_id); |
232 |
|
|
233 |
|
|
234 |
|
print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d}); |
235 |
|
|
236 |
|
if (-e $tar_path_final) { |
237 |
|
if ($opts{f}) { |
238 |
|
rmtree $tar_path_final || die "can't remove $tar_path_final: $!"; |
239 |
|
} else { |
240 |
|
die "$tar_path_final allready exists\n"; |
241 |
|
} |
242 |
|
} |
243 |
|
|
244 |
my $fh; |
my $fh; |
245 |
my $part = 0; |
my $part = 0; |
246 |
my $no_files = 0; |
my $no_files = 0; |
247 |
|
my $items_in_part = 0; |
248 |
|
|
249 |
sub new_tar_part { |
sub new_tar_part { |
250 |
|
my $arg = {@_}; |
251 |
|
|
252 |
if ($fh) { |
if ($fh) { |
253 |
return if ($current_tar_size == 0); |
return if ($current_tar_size == 0); |
254 |
|
|
255 |
print STDERR "# closing part $part\n" if ($opts{d}); |
print STDERR " $part"; |
256 |
|
|
257 |
# finish tar archive |
# |
258 |
|
# Finish with two null 512 byte headers, |
259 |
|
# and then round out a full block. |
260 |
|
# |
261 |
my $data = "\0" x ($tar_header_length * 2); |
my $data = "\0" x ($tar_header_length * 2); |
262 |
TarWrite($fh, \$data); |
TarWrite($fh, \$data); |
263 |
TarWrite($fh, undef); |
TarWrite($fh, undef); |
264 |
|
|
265 |
close($fh) || die "can't close archive part $part: $!"; |
close($fh) || die "can't close archive part $part: $!"; |
266 |
|
|
267 |
|
my $file = $tar_path . '/' . $part; |
268 |
|
|
269 |
|
my $md5 = read_file( $file . '.md5' ) || die "can't read md5sum file ${file}.md5"; |
270 |
|
$md5 =~ s/\s.*$//; |
271 |
|
|
272 |
|
my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz"; |
273 |
|
|
274 |
|
$sth_backup_parts->execute( |
275 |
|
$backup_id, |
276 |
|
$part, |
277 |
|
$current_tar_size, |
278 |
|
$size, |
279 |
|
$md5, |
280 |
|
$items_in_part, |
281 |
|
); |
282 |
|
|
283 |
|
$total_increment_size += $size; |
284 |
|
|
285 |
|
if ($arg->{close}) { |
286 |
|
|
287 |
|
sub move($$) { |
288 |
|
my ($from,$to) = @_; |
289 |
|
print STDERR "# rename $from -> $to\n" if ($opts{d}); |
290 |
|
rename $from, $to || die "can't move $from -> $to: $!\n"; |
291 |
|
} |
292 |
|
|
293 |
|
if ($part == 1) { |
294 |
|
print STDERR " single" if ($opts{v}); |
295 |
|
move("${tar_path}/1.tar.gz", "${tar_path_final}.tar.gz"); |
296 |
|
move("${tar_path}/1.md5", "${tar_path_final}.md5"); |
297 |
|
rmtree $tar_path or die "can't remove temporary dir $tar_path: $!"; |
298 |
|
} else { |
299 |
|
print STDERR " [last]" if ($opts{v}); |
300 |
|
move("${tar_path}", "${tar_path_final}"); |
301 |
|
|
302 |
|
# if this archive was single part, remove it |
303 |
|
foreach my $suffix (qw/.tar.gz .md5/) { |
304 |
|
my $path = $tar_path_final . $suffix; |
305 |
|
unlink $path if (-e $path); |
306 |
|
} |
307 |
|
} |
308 |
|
|
309 |
|
$sth_inc_size->execute( |
310 |
|
$total_increment_size, |
311 |
|
$part, |
312 |
|
$backup_id |
313 |
|
); |
314 |
|
|
315 |
|
print STDERR ", $total_increment_size bytes\n" if ($opts{v}); |
316 |
|
|
317 |
|
return; |
318 |
|
} |
319 |
|
|
320 |
} |
} |
321 |
|
|
322 |
$part++; |
$part++; |
324 |
# if this is first part, create directory |
# if this is first part, create directory |
325 |
|
|
326 |
if ($part == 1) { |
if ($part == 1) { |
327 |
if (-d $tar_path) { |
if (-e $tar_path) { |
328 |
print STDERR "# deleting existing $tar_path\n" if ($opts{d}); |
print STDERR "# deleting existing $tar_path\n" if ($opts{d}); |
329 |
rmtree($tar_path); |
rmtree($tar_path); |
330 |
} |
} |
331 |
mkdir($tar_path) || die "can't create directory $tar_path: $!"; |
mkdir($tar_path) || die "can't create directory $tar_path: $!"; |
332 |
|
|
333 |
|
sub abort_cleanup { |
334 |
|
print STDERR "ABORTED: cleanup temp dir"; |
335 |
|
rmtree($tar_path); |
336 |
|
$dbh->rollback; |
337 |
|
exit 1; |
338 |
|
} |
339 |
|
|
340 |
|
$SIG{'INT'} = \&abort_cleanup; |
341 |
|
$SIG{'QUIT'} = \&abort_cleanup; |
342 |
|
$SIG{'__DIE__'} = \&abort_cleanup; |
343 |
|
|
344 |
} |
} |
345 |
|
|
346 |
my $file = $tar_path . '/' . $part; |
my $file = $tar_path . '/' . $part; |
359 |
|
|
360 |
open($fh, $cmd) or die "can't open $cmd: $!"; |
open($fh, $cmd) or die "can't open $cmd: $!"; |
361 |
binmode($fh); |
binmode($fh); |
362 |
|
|
363 |
$current_tar_size = 0; |
$current_tar_size = 0; |
364 |
|
$items_in_part = 0; |
365 |
} |
} |
366 |
|
|
367 |
new_tar_part(); |
new_tar_part(); |
369 |
if (seedCache($Host, $ShareName, $Num)) { |
if (seedCache($Host, $ShareName, $Num)) { |
370 |
archiveWrite($fh, '/'); |
archiveWrite($fh, '/'); |
371 |
archiveWriteHardLinks($fh); |
archiveWriteHardLinks($fh); |
372 |
|
new_tar_part( close => 1 ); |
373 |
} else { |
} else { |
374 |
print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v}); |
print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v}); |
375 |
$no_files = 1; |
# remove temporary files if there are no files |
|
} |
|
|
|
|
|
# |
|
|
# Finish with two null 512 byte headers, and then round out a full |
|
|
# block. |
|
|
# |
|
|
my $data = "\0" x ($tar_header_length * 2); |
|
|
TarWrite($fh, \$data); |
|
|
TarWrite($fh, undef); |
|
|
|
|
|
if (! close($fh)) { |
|
|
rmtree($tar_path); |
|
|
die "can't close archive\n"; |
|
|
} |
|
|
|
|
|
# remove temporary files if there are no files |
|
|
if ($no_files) { |
|
376 |
rmtree($tar_path); |
rmtree($tar_path); |
|
} elsif ($part == 1) { |
|
|
warn "FIXME: if there is only one part move to parent directory and rename"; |
|
377 |
} |
} |
378 |
|
|
379 |
# |
# |
388 |
# Got errors, with no files or directories; exit with non-zero |
# Got errors, with no files or directories; exit with non-zero |
389 |
# status |
# status |
390 |
# |
# |
391 |
cleanup(); |
die "got errors or no files\n"; |
|
exit(1); |
|
392 |
} |
} |
393 |
|
|
394 |
exit(0); |
$sth_inc_size->finish; |
395 |
|
$sth_backup_parts->finish; |
396 |
|
|
397 |
|
$dbh->commit || die "can't commit changes to database"; |
398 |
|
$dbh->disconnect(); |
399 |
|
|
400 |
|
exit; |
401 |
|
|
402 |
########################################################################### |
########################################################################### |
403 |
# Subroutines |
# Subroutines |
484 |
my $done = $WriteBufSz - length($WriteBuf); |
my $done = $WriteBufSz - length($WriteBuf); |
485 |
if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done)) |
if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done)) |
486 |
!= $WriteBufSz ) { |
!= $WriteBufSz ) { |
487 |
print(STDERR "Unable to write to output file ($!)\n"); |
die "Unable to write to output file ($!)\n"; |
|
exit(1); |
|
488 |
} |
} |
489 |
while ( $done + $WriteBufSz <= length($$dataRef) ) { |
while ( $done + $WriteBufSz <= length($$dataRef) ) { |
490 |
if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz)) |
if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz)) |
491 |
!= $WriteBufSz ) { |
!= $WriteBufSz ) { |
492 |
print(STDERR "Unable to write to output file ($!)\n"); |
die "Unable to write to output file ($!)\n"; |
|
exit(1); |
|
493 |
} |
} |
494 |
$done += $WriteBufSz; |
$done += $WriteBufSz; |
495 |
} |
} |
594 |
sub seedCache($$$) { |
sub seedCache($$$) { |
595 |
my ($host, $share, $dumpNo) = @_; |
my ($host, $share, $dumpNo) = @_; |
596 |
|
|
597 |
my $dsn = $Conf{SearchDSN}; |
print STDERR curr_time(), "$host:$share #$dumpNo" if ($opts{v}); |
|
my $db_user = $Conf{SearchUser} || ''; |
|
|
|
|
|
print STDERR curr_time(), "getting files for $host:$share increment $dumpNo..." if ($opts{v}); |
|
598 |
my $sql = q{ |
my $sql = q{ |
599 |
SELECT path,size |
SELECT path,size |
600 |
FROM files |
FROM files |
603 |
WHERE hosts.name = ? and shares.name = ? and backupnum = ? |
WHERE hosts.name = ? and shares.name = ? and backupnum = ? |
604 |
}; |
}; |
605 |
|
|
|
my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1} ); |
|
606 |
my $sth = $dbh->prepare($sql); |
my $sth = $dbh->prepare($sql); |
607 |
$sth->execute($host, $share, $dumpNo); |
$sth->execute($host, $share, $dumpNo); |
608 |
my $count = $sth->rows; |
my $count = $sth->rows; |
609 |
print STDERR " found $count items\n" if ($opts{v}); |
print STDERR " $count items, parts:" if ($opts{v}); |
610 |
while (my $row = $sth->fetchrow_arrayref) { |
while (my $row = $sth->fetchrow_arrayref) { |
611 |
#print STDERR "+ ", $row->[0],"\n"; |
#print STDERR "+ ", $row->[0],"\n"; |
612 |
$in_backup_increment->{ $row->[0] } = $row->[1]; |
$in_backup_increment->{ $row->[0] } = $row->[1]; |
613 |
} |
} |
614 |
|
|
615 |
$sth->finish(); |
$sth->finish(); |
|
$dbh->disconnect(); |
|
616 |
|
|
617 |
return $count; |
return $count; |
618 |
} |
} |
654 |
# is this file too large to fit into MaxArchiveFileSize? |
# is this file too large to fit into MaxArchiveFileSize? |
655 |
|
|
656 |
if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) { |
if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) { |
657 |
print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d}); |
print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d}); |
658 |
new_tar_part(); |
new_tar_part(); |
659 |
} |
} |
660 |
|
|
661 |
print STDERR "A $tarPath [$size] tell: $current_tar_size\n" if ($opts{d}); |
#print STDERR "A $tarPath [$size] tell: $current_tar_size\n" if ($opts{d}); |
662 |
|
$items_in_part++; |
663 |
|
|
664 |
if ( defined($PathRemove) |
if ( defined($PathRemove) |
665 |
&& substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) { |
&& substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) { |
732 |
TarWrite($fh, \$data); |
TarWrite($fh, \$data); |
733 |
$size += length($data); |
$size += length($data); |
734 |
TarWritePad($fh, $size); |
TarWritePad($fh, $size); |
735 |
|
|
736 |
|
$items_in_part++; |
737 |
} |
} |
738 |
$f->close; |
$f->close; |
739 |
$FileCnt++; |
$FileCnt++; |