Browse Source

Non-existent files where causing this to fail, and the db, there was

also a die, now there is a check to avoid this.
master
freesource 20 years ago
parent
commit
f8f6630c0f
  1. 184
      bin/fastswim

184
bin/fastswim

@ -52,12 +52,13 @@ my %version;
#$#name = 2000; #$#name = 2000;
#$#ppackage = 2000; #$#ppackage = 2000;
# This way has been de-pre-c-whatever-ated because it lacks version
# rememberance, and is just kept for testing purposes # This way has been de-pre-c-whatever-ated because it lacks version
if ($#ARGV == -1) { # rememberance, and is just kept for testing purposes
print STDERR "swim: fastswim requires option/arguments, see program for instructions\n"; if ($#ARGV == -1) {
exit; print STDERR "swim: fastswim requires option/arguments, see program for instructions\n";
chdir("$ARGV[1]"); exit;
chdir("$ARGV[1]");
#consider readdir #consider readdir
@ppackage = <*.list>; @ppackage = <*.list>;
} }
@ -76,100 +77,105 @@ my %version;
} }
close(TRANSFER); close(TRANSFER);
} }
# Make a nice md. I decided on a Hash of Lists, giving all # Make a nice md. I decided on a Hash of Lists, giving all
# files/dirs unique name, and then a list of packages which # files/dirs unique name, and then a list of packages which
# correspond..because this should be faster than a Hash of Hash # correspond..because this should be faster than a Hash of Hash
# where you'd have to loop through all packages names..find the # where you'd have to loop through all packages names..find the
# files/dir in all packages names which are the same..I'd assume a HL # files/dir in all packages names which are the same..I'd assume a HL
# would be a quicker query, even though the Hash would be enormous. # would be a quicker query, even though the Hash would be enormous.
# Possible things: a tree for faster query. # Possible things: a tree for faster query.
# Put everything into an array..every other is package name # Put everything into an array..every other is package name
# Better check for packages which don't have /. in their *.list... # Better check for packages which don't have /. in their *.list...
# which is rare, but does happen. Sometimes *.list(s) don't have # which is rare, but does happen. Sometimes *.list(s) don't have
# all the parent directories, but we won't worry about that. # all the parent directories, but we won't worry about that.
print STDERR " Making the massive hash\n"; print STDERR " Making the massive hash\n";
$| = 1; my $x = 1; $| = 1; my $x = 1;
foreach $thingy (sort @ppackage) { foreach $thingy (sort @ppackage) {
open(LIST, "$ARGV[1]/$thingy") or die "Humm, strange";
if (-e "$ARGV[1]/$thingy") {
open(LIST, "$ARGV[1]/$thingy");
# Because of the version..there are sometimes dots # Because of the version..there are sometimes dots
$thingy =~ m,(.*)\.list,; $thingy =~ m,(.*)\.list,;
my $count = 0; my $count = 0;
my @count = <LIST>; my @count = <LIST>;
close(LIST); close(LIST);
foreach (@count) {
$x = 1 if $x == 6; foreach (@count) {
print STDERR "|\r" if $x == 1 || $x == 4; print STDERR "/\r" if $x == 2; $x = 1 if $x == 6;
print STDERR "-\r" if $x == 3 || $x == 6; print STDERR "\\\r" if $x == 5; print STDERR "|\r" if $x == 1 || $x == 4; print STDERR "/\r" if $x == 2;
$x++; print STDERR "-\r" if $x == 3 || $x == 6; print STDERR "\\\r" if $x == 5;
chomp $_; $x++;
# does /. exist? it should be first. chomp $_;
if ($count == 0) { # does /. exist? it should be first.
if ($_ !~ m,\/\.,) { if ($count == 0) {
my $shifter = $_; if ($_ !~ m,\/\.,) {
my @redolist = @count; my $shifter = $_;
push(@count,$shifter); my @redolist = @count;
# humm let's rebuild the offending backup list, this push(@count,$shifter);
# is important for --db. # humm let's rebuild the offending backup list, this
unshift(@redolist,"/."); # is important for --db.
open(REDOLIST, ">$ARGV[3]/$ARGV[1]/backup/$thingy.bk.bk") unshift(@redolist,"/.");
or warn "needed to edit $thingy because it lacked /., open(REDOLIST, ">$ARGV[3]/$ARGV[1]/backup/$thingy.bk.bk")
or warn "needed to edit $thingy because it lacked /.,
but could not open up a backup file but could not open up a backup file
$ARGV[3]/$ARGV[1]/backup/$thingy.bk.bk\n"; $ARGV[3]/$ARGV[1]/backup/$thingy.bk.bk\n";
my $rd; my $rd;
foreach $rd (@redolist) { foreach $rd (@redolist) {
chomp $rd; chomp $rd;
print REDOLIST "$rd\n"; print REDOLIST "$rd\n";
} }
close(REDOLIST); close(REDOLIST);
rename rename
("$ARGV[3]/$ARGV[1]/backup/$thingy.bk.bk","$ARGV[3]/$ARGV[1]/backup/$thingy.bk"); ("$ARGV[3]/$ARGV[1]/backup/$thingy.bk.bk","$ARGV[3]/$ARGV[1]/backup/$thingy.bk");
$_ = "/."; $_ = "/.";
} }
} }
$count = 1; $count = 1;
$repeaters{$_}++; $repeaters{$_}++;
if ($repeaters{$_} == 1) { if ($repeaters{$_} == 1) {
$temp = 0; $temp = 0;
} }
else { else {
$temp = $repeaters{$_} - 1; $temp = $repeaters{$_} - 1;
} }
if (defined $version{$1}) { if (defined $version{$1}) {
$HL{$_}[$temp] = "$1_$version{$1}"; $HL{$_}[$temp] = "$1_$version{$1}";
} }
} }
} }
undef @ppackage; }
undef @ppackage;
# We will create one file with the 1..and another with >1.. # We will create one file with the 1..and another with >1..
# than split..reverse..and order.accordingly..this makes # than split..reverse..and order.accordingly..this makes
# things much faster. Remember clean-up routines for kill. # things much faster. Remember clean-up routines for kill.
print STDERR " Starting ... writing to $ARGV[2]!\n"; print STDERR " Starting ... writing to $ARGV[2]!\n";
# Create the database # Create the database
open(BIG, ">$ARGV[2]/big.debian") or die; open(BIG, ">$ARGV[2]/big.debian") or die "Couldn't find big.debian\n";
open(LONG, ">$ARGV[2]/long.debian") or die; open(LONG, ">$ARGV[2]/long.debian") or die "Couldn't find long.debian\n";
foreach $thingy (sort keys %HL ) { foreach $thingy (sort keys %HL ) {
$x = 1 if $x == 6; $x = 1 if $x == 6;
print STDERR "|\r" if $x == 1 || $x == 4; print STDERR "/\r" if $x == 2; print STDERR "|\r" if $x == 1 || $x == 4; print STDERR "/\r" if $x == 2;
print STDERR "-\r" if $x == 3 || $x == 6; print STDERR "\\\r" if $x == 5; print STDERR "-\r" if $x == 3 || $x == 6; print STDERR "\\\r" if $x == 5;
$x++; $x++;
# Humm, will split or grep be faster? # Humm, will split or grep be faster?
#my $tingy = "@{ $HL{$thingy} }" . " " . @{ $HL{$thingy} }; #my $tingy = "@{ $HL{$thingy} }" . " " . @{ $HL{$thingy} };
my $tingy = "@{ $HL{$thingy} }"; my $tingy = "@{ $HL{$thingy} }";
if (@{ $HL{$thingy} } > 1 || @{ $HL{$thingy} } eq "") { if (@{ $HL{$thingy} } > 1 || @{ $HL{$thingy} } eq "") {
print LONG "$thingy -> $tingy\n"; print LONG "$thingy -> $tingy\n";
} }
elsif (@{ $HL{$thingy} } == 1) { elsif (@{ $HL{$thingy} } == 1) {
print BIG "$thingy -> $tingy\n"; print BIG "$thingy -> $tingy\n";
} }
} }
#print "Finished\n"; #print "Finished\n";
close(BIG); close(BIG);
close(LONG); close(LONG);
#undef %HL; #undef %HL;
print STDERR " Cleaning up\n"; print STDERR " Cleaning up\n";
__END__ __END__

Loading…
Cancel
Save