16 |
use Algorithm::Diff; |
use Algorithm::Diff; |
17 |
use Getopt::Std; |
use Getopt::Std; |
18 |
use File::Slurp; |
use File::Slurp; |
19 |
|
use File::Pid; |
20 |
|
|
21 |
|
=head1 NAME |
22 |
|
|
23 |
|
BackupPC_incPartsUpdate |
24 |
|
|
25 |
|
=head1 DESCRIPTION |
26 |
|
|
27 |
|
Create C<.tar.gz> increments on disk calling C<BackupPC_tarIncCreate>. |
28 |
|
|
29 |
|
Following options are supported (but all are optional): |
30 |
|
|
31 |
|
=over 4 |
32 |
|
|
33 |
|
=item -h hostname |
34 |
|
|
35 |
|
Update parts for just single C<hostname> |
36 |
|
|
37 |
|
=item -c |
38 |
|
|
39 |
|
Force check for tar archives which exist on disk |
40 |
|
|
41 |
|
=item -d |
42 |
|
|
43 |
|
Turn debugging output |
44 |
|
|
45 |
|
=back |
46 |
|
|
47 |
|
=cut |
48 |
|
|
49 |
|
my %opt; |
50 |
|
getopts("cdh:", \%opt ); |
51 |
|
|
52 |
|
my $debug = $opt{d}; |
53 |
|
my $check = $opt{c} && print STDERR "NOTICE: tar archive check forced\n"; |
54 |
|
|
55 |
|
my $pid_path = abs_path($0); |
56 |
|
$pid_path =~ s/\W+/_/g; |
57 |
|
|
58 |
|
my $pidfile = new File::Pid({ |
59 |
|
file => "/tmp/$pid_path", |
60 |
|
}); |
61 |
|
|
62 |
|
if (my $pid = $pidfile->running ) { |
63 |
|
die "$0 already running: $pid\n"; |
64 |
|
} elsif ($pidfile->pid ne $$) { |
65 |
|
$pidfile->remove; |
66 |
|
$pidfile = new File::Pid; |
67 |
|
} |
68 |
|
|
69 |
|
print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n"; |
70 |
|
$pidfile->write; |
71 |
|
|
72 |
my $bpc = BackupPC::Lib->new || die "can't create BackupPC::Lib"; |
my $bpc = BackupPC::Lib->new || die "can't create BackupPC::Lib"; |
73 |
my %Conf = $bpc->Conf(); |
my %Conf = $bpc->Conf(); |
86 |
$bin->{$c} = which($c) || die "$0 needs $c, install it\n"; |
$bin->{$c} = which($c) || die "$0 needs $c, install it\n"; |
87 |
} |
} |
88 |
|
|
|
my %opt; |
|
|
getopts("cd", \%opt ); |
|
|
|
|
|
my $debug = $opt{d}; |
|
|
my $check = $opt{c} && print STDERR "NOTICE: tar archive check forced\n"; |
|
|
|
|
89 |
$|=1; |
$|=1; |
90 |
|
|
91 |
my $start_t = time(); |
my $start_t = time(); |
130 |
FROM backups |
FROM backups |
131 |
INNER JOIN shares ON backups.shareID=shares.ID |
INNER JOIN shares ON backups.shareID=shares.ID |
132 |
INNER JOIN hosts ON backups.hostID = hosts.ID |
INNER JOIN hosts ON backups.hostID = hosts.ID |
133 |
where hosts.name = ? and shares.name = ? and backups.num = ? |
WHERE hosts.name = ? and shares.name = ? and backups.num = ? |
134 |
}); |
}); |
135 |
$sth->execute($host, $share, $num); |
$sth->execute($host, $share, $num); |
136 |
my ($id) = $sth->fetchrow_array; |
my ($id) = $sth->fetchrow_array; |
149 |
my $t = time(); |
my $t = time(); |
150 |
print curr_time, " check $host:$share#$num -> $filename"; |
print curr_time, " check $host:$share#$num -> $filename"; |
151 |
|
|
152 |
|
# depending on expected returned value this is used like: |
153 |
|
# my $uncompress_size = get_gzip_size('/full/path/to.gz'); |
154 |
|
# my ($compress_size, $uncompress_size) = get_gzip_size('/path.gz'); |
155 |
|
sub get_gzip_size($) { |
156 |
|
my $filename = shift; |
157 |
|
die "file $filename problem: $!" unless (-r $filename); |
158 |
|
open(my $gzip, $bin->{gzip}." -l $filename |") || die "can't gzip -l $filename: $!"; |
159 |
|
my $line = <$gzip>; |
160 |
|
chomp($line); |
161 |
|
$line = <$gzip> if ($line =~ /^\s+compressed/); |
162 |
|
|
163 |
|
my ($comp, $uncomp) = (0,0); |
164 |
|
|
165 |
|
if ($line =~ m/^\s+(\d+)\s+(\d+)\s+\d+\.\d+/) { |
166 |
|
if (wantarray) { |
167 |
|
return [ $1, $2 ]; |
168 |
|
} else { |
169 |
|
return $2; |
170 |
|
} |
171 |
|
} else { |
172 |
|
die "can't find size in line: $line"; |
173 |
|
} |
174 |
|
} |
175 |
|
|
176 |
sub check_part { |
sub check_part { |
177 |
my ($host, $share, $num, $part_nr, $tar_size, $size, $md5, $items) = @_; |
my ($host, $share, $num, $part_nr, $tar_size, $size, $md5, $items) = @_; |
178 |
my $backup_id = get_backup_id($host, $share, $num); |
my $backup_id = get_backup_id($host, $share, $num); |
235 |
|
|
236 |
print "\n\t- $tarfilename"; |
print "\n\t- $tarfilename"; |
237 |
|
|
238 |
my $size = (stat( "$tar_dir/$tarfilename" ))[7] || die "can't stat $tar_dir/$tarfilename"; |
my $path = "$tar_dir/$tarfilename"; |
239 |
|
|
240 |
|
my $size = (stat( $path ))[7] || die "can't stat $path: $!"; |
241 |
|
|
242 |
if ($size > $Conf{MaxArchiveSize}) { |
if ($size > $Conf{MaxArchiveSize}) { |
243 |
print ", part bigger than media $size > $Conf{MaxArchiveSize}\n"; |
print ", part bigger than media $size > $Conf{MaxArchiveSize}\n"; |
246 |
|
|
247 |
print ", $size bytes"; |
print ", $size bytes"; |
248 |
|
|
|
my $path = "$tar_dir/$tarfilename"; |
|
249 |
|
|
250 |
open(my $fh, "gzip -cd $tar_dir/$tarfilename |") or die "can't open $tar_dir/$tarfilename: $!"; |
open(my $fh, "gzip -cd $path |") or die "can't open $path: $!"; |
251 |
binmode($fh); |
binmode($fh); |
252 |
my $tar = Archive::Tar::Streamed->new($fh); |
my $tar = Archive::Tar::Streamed->new($fh); |
253 |
|
|
254 |
my $tar_size = 0; |
my $tar_size_inarc = 0; |
255 |
my $items = 0; |
my $items = 0; |
256 |
|
|
257 |
while(my $entry = $tar->next) { |
while(my $entry = $tar->next) { |
258 |
push @tar_files, $entry->name; |
push @tar_files, $entry->name; |
259 |
$items++; |
$items++; |
260 |
$tar_size += $entry->size; |
$tar_size_inarc += $entry->size; |
261 |
|
|
262 |
if ($tar_size > $Conf{MaxArchiveFileSize}) { |
if ($tar_size_inarc > $Conf{MaxArchiveFileSize}) { |
263 |
print ", part $tarfilename is too big $tar_size > $Conf{MaxArchiveFileSize}\n"; |
print ", part $tarfilename is too big $tar_size_inarc > $Conf{MaxArchiveFileSize}\n"; |
264 |
return 0; |
return 0; |
265 |
} |
} |
266 |
|
|
267 |
} |
} |
268 |
|
|
269 |
|
close($fh); |
270 |
|
|
271 |
print ", $items items"; |
print ", $items items"; |
272 |
|
|
273 |
if ($tar_size == 0) { |
if ($tar_size_inarc == 0 && $items == 0) { |
274 |
print ", EMPTY tar\n"; |
print ", EMPTY tar\n"; |
275 |
|
|
276 |
my $backup_id = get_backup_id($host, $share, $num); |
my $backup_id = get_backup_id($host, $share, $num); |
281 |
where id = ? |
where id = ? |
282 |
}); |
}); |
283 |
$sth_inc_deleted->execute($backup_id); |
$sth_inc_deleted->execute($backup_id); |
284 |
|
|
285 |
|
$dbh->commit; |
286 |
|
|
287 |
|
return 1; |
288 |
} |
} |
289 |
|
|
290 |
|
my $tar_size = get_gzip_size( $path ); |
291 |
|
|
292 |
|
# real tar size is bigger because of padding |
293 |
|
if ($tar_size_inarc > $tar_size) { |
294 |
|
print ", size of files in tar ($tar_size_inarc) bigger than whole tar ($tar_size)!\n"; |
295 |
|
return 0; |
296 |
|
} |
297 |
|
|
298 |
# |
# |
299 |
# check if md5 exists, and if not, create one |
# check if md5 exists, and if not, create one |
378 |
hosts.name as host, |
hosts.name as host, |
379 |
shares.name as share, |
shares.name as share, |
380 |
backups.num as num, |
backups.num as num, |
381 |
|
backups.date, |
382 |
inc_size, |
inc_size, |
383 |
parts |
parts, |
384 |
|
count(backup_parts.backup_id) as backup_parts |
385 |
from backups |
from backups |
386 |
join shares on backups.hostid = shares.hostid |
join shares on backups.hostid = shares.hostid |
387 |
and shares.id = backups.shareid |
and shares.id = backups.shareid |
388 |
join hosts on shares.hostid = hosts.id |
join hosts on shares.hostid = hosts.id |
389 |
where not inc_deleted |
full outer join backup_parts on backups.id = backup_parts.backup_id |
390 |
|
where not inc_deleted and backups.size > 0 |
391 |
|
group by backups.id, hosts.name, shares.name, backups.num, backups.date, inc_size, parts, backup_parts.backup_id |
392 |
order by backups.date |
order by backups.date |
393 |
|
|
394 |
} ); |
} ); |
397 |
my $num_backups = $sth->rows; |
my $num_backups = $sth->rows; |
398 |
my $curr_backup = 1; |
my $curr_backup = 1; |
399 |
|
|
400 |
|
if ($opt{h}) { |
401 |
|
warn "making increments just for host $opt{h}\n"; |
402 |
|
} |
403 |
|
|
404 |
while (my $row = $sth->fetchrow_hashref) { |
while (my $row = $sth->fetchrow_hashref) { |
405 |
|
|
406 |
|
if ($opt{h} && $row->{host} ne $opt{h}) { |
407 |
|
warn "skipped $row->{host}\n" if ($debug); |
408 |
|
next; |
409 |
|
} |
410 |
|
|
411 |
$curr_backup++; |
$curr_backup++; |
412 |
|
|
413 |
my $tar_file = BackupPC::SearchLib::getGzipName($row->{'host'}, $row->{'share'}, $row->{'num'}); |
my $tar_file = BackupPC::SearchLib::getGzipName($row->{'host'}, $row->{'share'}, $row->{'num'}); |
417 |
|
|
418 |
print "# size: $size backup.size: ", $row->{inc_size},"\n" if ($opt{d}); |
print "# size: $size backup.size: ", $row->{inc_size},"\n" if ($opt{d}); |
419 |
|
|
420 |
if ( $row->{'inc_size'} != -1 && $size != -1 && $row->{'inc_size'} >= $size) { |
if ( $row->{'inc_size'} != -1 && $size != -1 && $row->{'inc_size'} >= $size && $row->{parts} == $row->{backup_parts}) { |
421 |
if ($check) { |
if ($check) { |
422 |
tar_check($row->{'host'}, $row->{'share'}, $row->{'num'}, $tar_file) && next; |
tar_check($row->{'host'}, $row->{'share'}, $row->{'num'}, $tar_file) && next; |
423 |
} else { |
} else { |
425 |
} |
} |
426 |
} |
} |
427 |
|
|
428 |
print curr_time, " creating $curr_backup/$num_backups ", $row->{'host'}, ":", $row->{'share'}, " #", $row->{'num'}, " -> $tar_file"; |
print curr_time, " creating $curr_backup/$num_backups ", $row->{host}, ":", $row->{share}, " #", $row->{num}, |
429 |
|
" ", strftime('%Y-%m-%d', localtime($row->{date})), " -> $tar_file"; |
430 |
|
|
431 |
my $t = time(); |
my $t = time(); |
432 |
|
|