Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

611 lines
15 KiB

  1. @rem = '--*-Perl-*--
  2. @echo off
  3. if "%OS%" == "Windows_NT" goto WinNT
  4. perl -x -S "%0" %1 %2 %3 %4 %5 %6 %7 %8 %9
  5. goto endofperl
  6. :WinNT
  7. perl -x -S "%0" %*
  8. if NOT "%COMSPEC%" == "%SystemRoot%\system32\cmd.exe" goto endofperl
  9. if %errorlevel% == 9009 echo You do not have Perl in your PATH.
  10. goto endofperl
  11. @rem ';
  12. #!perl -w
  13. #line 14
  14. #line 18
  15. =head1 NAME
  16. lwp-rget - Retrieve WWW documents recursively
  17. =head1 SYNOPSIS
  18. lwp-rget [--verbose] [--auth=USER:PASS] [--depth=N] [--hier] [--iis]
  19. [--keepext=mime/type[,mime/type]] [--limit=N] [--nospace]
  20. [--prefix=URL] [--referer=URL] [--sleep=N] [--tolower] <URL>
  21. lwp-rget --version
  22. =head1 DESCRIPTION
  23. This program will retrieve a document and store it in a local file. It
  24. will follow any links found in the document and store these documents
  25. as well, patching links so that they refer to these local copies.
  26. This process continues until there are no more unvisited links or the
  27. process is stopped by the one or more of the limits which can be
  28. controlled by the command line arguments.
  29. This program is useful if you want to make a local copy of a
  30. collection of documents or want to do web reading off-line.
  31. All documents are stored as plain files in the current directory. The
  32. file names chosen are derived from the last component of URL paths.
  33. The options are:
  34. =over 3
  35. =item --auth=USER:PASS<n>
  36. Set the authentication credentials to user "USER" and password "PASS" if
  37. any restricted parts of the web site are hit. If there are restricted
  38. parts of the web site and authentication credentials are not available,
  39. those pages will not be downloaded.
  40. =item --depth=I<n>
  41. Limit the recursive level. Embedded images are always loaded, even if
  42. they fall outside the I<--depth>. This means that one can use
  43. I<--depth=0> in order to fetch a single document together with all
  44. inline graphics.
  45. The default depth is 5.
  46. =item --hier
  47. Download files into a hierarchy that mimics the web site structure.
  48. The default is to put all files in the current directory.
  49. =item --referer=I<URI>
  50. Set the value of the referer header for the initial request. The
  51. special value C<"NONE"> can be used to suppress the referer header in
  52. any of subsequent requests.
  53. =item --iis
  54. Sends an "Accept: */*" on all URL requests as a workaround for a bug in
  55. IIS 2.0. If no Accept MIME header is present, IIS 2.0 returns with a
  56. "406 No acceptable objects were found" error. Also converts any back
  57. slashes (\\) in URLs to forward slashes (/).
  58. =item --keepext=I<mime/type[,mime/type]>
  59. Keeps the current extension for the list MIME types. Useful when
  60. downloading text/plain documents that shouldn't all be translated to
  61. *.txt files.
  62. =item --limit=I<n>
  63. Limit the number of documents to get. The default limit is 50.
  64. =item --nospace
  65. Changes spaces in all URLs to underscore characters (_). Useful when
  66. downloading files from sites serving URLs with spaces in them. Does not
  67. remove spaces from fragments, e.g., "file.html#somewhere in here".
  68. =item --prefix=I<url_prefix>
  69. Limit the links to follow. Only URLs that start the prefix string are
  70. followed.
  71. The default prefix is set as the "directory" of the initial URL to
  72. follow. For instance if we start lwp-rget with the URL
  73. C<http://www.sn.no/foo/bar.html>, then prefix will be set to
  74. C<http://www.sn.no/foo/>.
  75. Use C<--prefix=''> if you don't want the fetching to be limited by any
  76. prefix.
  77. =item --sleep=I<n>
  78. Sleep I<n> seconds before retrieving each document. This options allows
  79. you to go slowly, not loading the server you visiting too much.
  80. =item --tolower
  81. Translates all links to lowercase. Useful when downloading files from
  82. IIS since it does not serve files in a case sensitive manner.
  83. =item --verbose
  84. Make more noise while running.
  85. =item --quiet
  86. Don't make any noise.
  87. =item --version
  88. Print program version number and quit.
  89. =item --help
  90. Print the usage message and quit.
  91. =back
  92. Before the program exits the name of the file, where the initial URL
  93. is stored, is printed on stdout. All used filenames are also printed
  94. on stderr as they are loaded. This printing can be suppressed with
  95. the I<--quiet> option.
  96. =head1 SEE ALSO
  97. L<lwp-request>, L<LWP>
  98. =head1 AUTHOR
  99. Gisle Aas <[email protected]>
  100. =cut
  101. use strict;
  102. use Getopt::Long qw(GetOptions);
  103. use URI::URL qw(url);
  104. use LWP::MediaTypes qw(media_suffix);
  105. use HTML::Entities ();
  106. use vars qw($VERSION);
  107. use vars qw($MAX_DEPTH $MAX_DOCS $PREFIX $REFERER $VERBOSE $QUIET $SLEEP $HIER $AUTH $IIS $TOLOWER $NOSPACE %KEEPEXT);
  108. my $progname = $0;
  109. $progname =~ s|.*/||; # only basename left
  110. $progname =~ s/\.\w*$//; #strip extension if any
  111. $VERSION = sprintf("%d.%02d", q$Revision: 1.19 $ =~ /(\d+)\.(\d+)/);
  112. #$Getopt::Long::debug = 1;
  113. #$Getopt::Long::ignorecase = 0;
  114. # Defaults
  115. $MAX_DEPTH = 5;
  116. $MAX_DOCS = 50;
  117. GetOptions('version' => \&print_version,
  118. 'help' => \&usage,
  119. 'depth=i' => \$MAX_DEPTH,
  120. 'limit=i' => \$MAX_DOCS,
  121. 'verbose!' => \$VERBOSE,
  122. 'quiet!' => \$QUIET,
  123. 'sleep=i' => \$SLEEP,
  124. 'prefix:s' => \$PREFIX,
  125. 'referer:s'=> \$REFERER,
  126. 'hier' => \$HIER,
  127. 'auth=s' => \$AUTH,
  128. 'iis' => \$IIS,
  129. 'tolower' => \$TOLOWER,
  130. 'nospace' => \$NOSPACE,
  131. 'keepext=s' => \$KEEPEXT{'OPT'},
  132. ) || usage();
  133. sub print_version {
  134. require LWP;
  135. my $DISTNAME = 'libwww-perl-' . LWP::Version();
  136. print <<"EOT";
  137. This is lwp-rget version $VERSION ($DISTNAME)
  138. Copyright 1996-1998, Gisle Aas.
  139. This program is free software; you can redistribute it and/or
  140. modify it under the same terms as Perl itself.
  141. EOT
  142. exit 0;
  143. }
  144. my $start_url = shift || usage();
  145. usage() if @ARGV;
  146. require LWP::UserAgent;
  147. my $ua = new LWP::UserAgent;
  148. $ua->agent("$progname/$VERSION " . $ua->agent);
  149. $ua->env_proxy;
  150. unless (defined $PREFIX) {
  151. $PREFIX = url($start_url); # limit to URLs below this one
  152. eval {
  153. $PREFIX->eparams(undef);
  154. $PREFIX->equery(undef);
  155. };
  156. $_ = $PREFIX->epath;
  157. s|[^/]+$||;
  158. $PREFIX->epath($_);
  159. $PREFIX = $PREFIX->as_string;
  160. }
  161. %KEEPEXT = map { lc($_) => 1 } split(/\s*,\s*/, ($KEEPEXT{'OPT'}||0));
  162. my $SUPPRESS_REFERER;
  163. $SUPPRESS_REFERER++ if ($REFERER || "") eq "NONE";
  164. print <<"" if $VERBOSE;
  165. START = $start_url
  166. MAX_DEPTH = $MAX_DEPTH
  167. MAX_DOCS = $MAX_DOCS
  168. PREFIX = $PREFIX
  169. my $no_docs = 0;
  170. my %seen = (); # mapping from URL => local_file
  171. my $filename = fetch($start_url, undef, $REFERER);
  172. print "$filename\n" unless $QUIET;
  173. sub fetch
  174. {
  175. my($url, $type, $referer, $depth) = @_;
  176. # Fix http://sitename.com/../blah/blah.html to
  177. # http://sitename.com/blah/blah.html
  178. $url = $url->as_string if (ref($url));
  179. while ($url =~ s#(https?://[^/]+/)\.\.\/#$1#) {}
  180. # Fix backslashes (\) in URL if $IIS defined
  181. $url = fix_backslashes($url) if (defined $IIS);
  182. $url = url($url) unless ref($url);
  183. $type ||= 'a';
  184. # Might be the background attribute
  185. $type = 'img' if ($type eq 'body' || $type eq 'td');
  186. $depth ||= 0;
  187. # Print the URL before we start checking...
  188. my $out = (" " x $depth) . $url . " ";
  189. $out .= "." x (60 - length($out));
  190. print STDERR $out . " " if $VERBOSE;
  191. # Can't get mailto things
  192. if ($url->scheme eq 'mailto') {
  193. print STDERR "*skipping mailto*\n" if $VERBOSE;
  194. return $url->as_string;
  195. }
  196. # The $plain_url is a URL without the fragment part
  197. my $plain_url = $url->clone;
  198. $plain_url->frag(undef);
  199. # Check PREFIX, but not for <IMG ...> links
  200. if ($type ne 'img' and $url->as_string !~ /^\Q$PREFIX/o) {
  201. print STDERR "*outsider*\n" if $VERBOSE;
  202. return $url->as_string;
  203. }
  204. # Translate URL to lowercase if $TOLOWER defined
  205. $plain_url = to_lower($plain_url) if (defined $TOLOWER);
  206. # If we already have it, then there is nothing to be done
  207. my $seen = $seen{$plain_url->as_string};
  208. if ($seen) {
  209. my $frag = $url->frag;
  210. $seen .= "#$frag" if defined($frag);
  211. $seen = protect_frag_spaces($seen);
  212. print STDERR "$seen (again)\n" if $VERBOSE;
  213. return $seen;
  214. }
  215. # Too much or too deep
  216. if ($depth > $MAX_DEPTH and $type ne 'img') {
  217. print STDERR "*too deep*\n" if $VERBOSE;
  218. return $url;
  219. }
  220. if ($no_docs > $MAX_DOCS) {
  221. print STDERR "*too many*\n" if $VERBOSE;
  222. return $url;
  223. }
  224. # Fetch document
  225. $no_docs++;
  226. sleep($SLEEP) if $SLEEP;
  227. my $req = HTTP::Request->new(GET => $url);
  228. # See: http://ftp.sunet.se/pub/NT/mirror-microsoft/kb/Q163/7/74.TXT
  229. $req->header ('Accept', '*/*') if (defined $IIS); # GIF/JPG from IIS 2.0
  230. $req->authorization_basic(split (/:/, $AUTH)) if (defined $AUTH);
  231. $req->referer($referer) if $referer && !$SUPPRESS_REFERER;
  232. my $res = $ua->request($req);
  233. # Check outcome
  234. if ($res->is_success) {
  235. my $doc = $res->content;
  236. my $ct = $res->content_type;
  237. my $name = find_name($res->request->url, $ct);
  238. print STDERR "$name\n" unless $QUIET;
  239. $seen{$plain_url->as_string} = $name;
  240. # If the file is HTML, then we look for internal links
  241. if ($ct eq "text/html") {
  242. # Save an unprosessed version of the HTML document. This
  243. # both reserves the name used, and it also ensures that we
  244. # don't loose everything if this program is killed before
  245. # we finish.
  246. save($name, $doc);
  247. my $base = $res->base;
  248. # Follow and substitute links...
  249. $doc =~
  250. s/
  251. (
  252. <(img|a|body|area|frame|td)\b # some interesting tag
  253. [^>]+ # still inside tag (not strictly correct)
  254. \b(?:src|href|background) # some link attribute
  255. \s*=\s* # =
  256. )
  257. (?: # scope of OR-ing
  258. (")([^"]*)" | # value in double quotes OR
  259. (')([^']*)' | # value in single quotes OR
  260. ([^\s>]+) # quoteless value
  261. )
  262. /
  263. new_link($1, lc($2), $3||$5, HTML::Entities::decode($4||$6||$7),
  264. $base, $name, "$url", $depth+1)
  265. /giex;
  266. # XXX
  267. # The regular expression above is not strictly correct.
  268. # It is not really possible to parse HTML with a single
  269. # regular expression, but it is faster. Tags that might
  270. # confuse us include:
  271. # <a alt="href" href=link.html>
  272. # <a alt=">" href="link.html">
  273. #
  274. }
  275. save($name, $doc);
  276. return $name;
  277. } else {
  278. print STDERR $res->code . " " . $res->message . "\n" if $VERBOSE;
  279. $seen{$plain_url->as_string} = $url->as_string;
  280. return $url->as_string;
  281. }
  282. }
  283. sub new_link
  284. {
  285. my($pre, $type, $quote, $url, $base, $localbase, $referer, $depth) = @_;
  286. $url = protect_frag_spaces($url);
  287. $url = fetch(url($url, $base)->abs, $type, $referer, $depth);
  288. $url = url("file:$url", "file:$localbase")->rel
  289. unless $url =~ /^[.+\-\w]+:/;
  290. $url = unprotect_frag_spaces($url);
  291. return $pre . $quote . $url . $quote;
  292. }
  293. sub protect_frag_spaces
  294. {
  295. my ($url) = @_;
  296. $url = $url->as_string if (ref($url));
  297. if ($url =~ m/^([^#]*#)(.+)$/)
  298. {
  299. my ($base, $frag) = ($1, $2);
  300. $frag =~ s/ /%20/g;
  301. $url = $base . $frag;
  302. }
  303. return $url;
  304. }
  305. sub unprotect_frag_spaces
  306. {
  307. my ($url) = @_;
  308. $url = $url->as_string if (ref($url));
  309. if ($url =~ m/^([^#]*#)(.+)$/)
  310. {
  311. my ($base, $frag) = ($1, $2);
  312. $frag =~ s/%20/ /g;
  313. $url = $base . $frag;
  314. }
  315. return $url;
  316. }
  317. sub fix_backslashes
  318. {
  319. my ($url) = @_;
  320. my ($base, $frag);
  321. $url = $url->as_string if (ref($url));
  322. if ($url =~ m/([^#]+)(#.*)/)
  323. {
  324. ($base, $frag) = ($1, $2);
  325. }
  326. else
  327. {
  328. $base = $url;
  329. $frag = "";
  330. }
  331. $base =~ tr/\\/\//;
  332. $base =~ s/%5[cC]/\//g; # URL-encoded back slash is %5C
  333. return $base . $frag;
  334. }
  335. sub to_lower
  336. {
  337. my ($url) = @_;
  338. my $was_object = 0;
  339. if (ref($url))
  340. {
  341. $url = $url->as_string;
  342. $was_object = 1;
  343. }
  344. if ($url =~ m/([^#]+)(#.*)/)
  345. {
  346. $url = lc($1) . $2;
  347. }
  348. else
  349. {
  350. $url = lc($url);
  351. }
  352. if ($was_object == 1)
  353. {
  354. return url($url);
  355. }
  356. else
  357. {
  358. return $url;
  359. }
  360. }
  361. sub translate_spaces
  362. {
  363. my ($url) = @_;
  364. my ($base, $frag);
  365. $url = $url->as_string if (ref($url));
  366. if ($url =~ m/([^#]+)(#.*)/)
  367. {
  368. ($base, $frag) = ($1, $2);
  369. }
  370. else
  371. {
  372. $base = $url;
  373. $frag = "";
  374. }
  375. $base =~ s/^ *//; # Remove initial spaces from base
  376. $base =~ s/ *$//; # Remove trailing spaces from base
  377. $base =~ tr/ /_/;
  378. $base =~ s/%20/_/g; # URL-encoded space is %20
  379. return $base . $frag;
  380. }
  381. sub mkdirp
  382. {
  383. my($directory, $mode) = @_;
  384. my @dirs = split(/\//, $directory);
  385. my $path = shift(@dirs); # build it as we go
  386. my $result = 1; # assume it will work
  387. unless (-d $path) {
  388. $result &&= mkdir($path, $mode);
  389. }
  390. foreach (@dirs) {
  391. $path .= "/$_";
  392. if ( ! -d $path) {
  393. $result &&= mkdir($path, $mode);
  394. }
  395. }
  396. return $result;
  397. }
  398. sub find_name
  399. {
  400. my($url, $type) = @_;
  401. #print "find_name($url, $type)\n";
  402. # Translate spaces in URL to underscores (_) if $NOSPACE defined
  403. $url = translate_spaces($url) if (defined $NOSPACE);
  404. # Translate URL to lowercase if $TOLOWER defined
  405. $url = to_lower($url) if (defined $TOLOWER);
  406. $url = url($url) unless ref($url);
  407. my $path = $url->path;
  408. # trim path until only the basename is left
  409. $path =~ s|(.*/)||;
  410. my $dirname = ".$1";
  411. if (!$HIER) {
  412. $dirname = "";
  413. } elsif (! -d $dirname) {
  414. mkdirp($dirname, 0775);
  415. }
  416. my $extra = ""; # something to make the name unique
  417. my $suffix;
  418. if ($KEEPEXT{lc($type)}) {
  419. $suffix = ($path =~ m/\.(.*)/) ? $1 : "";
  420. } else {
  421. $suffix = media_suffix($type);
  422. }
  423. $path =~ s|\..*||; # trim suffix
  424. $path = "index" unless length $path;
  425. while (1) {
  426. # Construct a new file name
  427. my $file = $dirname . $path . $extra;
  428. $file .= ".$suffix" if $suffix;
  429. # Check if it is unique
  430. return $file unless -f $file;
  431. # Try something extra
  432. unless ($extra) {
  433. $extra = "001";
  434. next;
  435. }
  436. $extra++;
  437. }
  438. }
  439. sub save
  440. {
  441. my $name = shift;
  442. #print "save($name,...)\n";
  443. open(FILE, ">$name") || die "Can't save $name: $!";
  444. binmode FILE;
  445. print FILE $_[0];
  446. close(FILE);
  447. }
  448. sub usage
  449. {
  450. die <<"";
  451. Usage: $progname [options] <URL>
  452. Allowed options are:
  453. --auth=USER:PASS Set authentication credentials for web site
  454. --depth=N Maximum depth to traverse (default is $MAX_DEPTH)
  455. --hier Download into hierarchy (not all files into cwd)
  456. --referer=URI Set initial referer header (or "NONE")
  457. --iis Workaround IIS 2.0 bug by sending "Accept: */*" MIME
  458. header; translates backslashes (\\) to forward slashes (/)
  459. --keepext=type Keep file extension for MIME types (comma-separated list)
  460. --limit=N A limit on the number documents to get (default is $MAX_DOCS)
  461. --nospace Translate spaces URLs (not #fragments) to underscores (_)
  462. --version Print version number and quit
  463. --verbose More output
  464. --quiet No output
  465. --sleep=SECS Sleep between gets, ie. go slowly
  466. --prefix=PREFIX Limit URLs to follow to those which begin with PREFIX
  467. --tolower Translate all URLs to lowercase (useful with IIS servers)
  468. }
  469. __END__
  470. :endofperl