网页爬虫的一个类模块。
来源:互联网 发布:禁用usb接口软件 编辑:程序博客网 时间:2024/05/22 15:10
其中ImageUrls返回分析得到的图片链接。LinkUrls返回分析得到的网页链接。BodyText返回去掉标记的网页正文部分。TitleText返回网页的标题。
{
A Class for Page Analysis
Author: Liu LIU
Mail: Geo(DOT)Cra(AT)Gmail(DOT)com
Web: http://www.aivisoft.net/
}
unit Crawl;
interface
uses
Math, Windows, SysUtils, Variants, Classes, unitypes;
const
PoolSize: longint = $100;
SourceTags1: array[0..9] of string = ('<', '>', '&', '"', '®',
'©', '™', ' ', ' ', ' ');
SourceTags2: array[0..9] of string = ('<', '>', '&', '"', '®',
'©', '&trade', '&ensp', '&emsp', ' ');
DestTags: array[0..9] of Char = ('<', '>', '&', '"', '?', '?', '?', ' ', ' ', ' ');
type
TCrawler = class
private
LinkPool: array[0..$FF] of TStringList;
function Hash(S: string): longint;
function GetPlainText(S: string): string;
function NaiveMatch(S, T: string; Start, LengthOfS, LengthOfT: longint): longint;
public
ImageUrls, LinkUrls: TStringList;
BodyText, TitleText: string;
procedure Init;
procedure PageAnalysis(SrcHTML, SrcUrl: string);
function LoadFromFile(FileName: string): boolean;
function SaveToFile(FileName: string): boolean;
destructor Destroy; override;
end;
implementation
function TCrawler.Hash(S: string): longint;
var
i, Total: longint;
begin
Total := 0;
for i := 1 to Length(S) do Inc(Total, Ord(S[i]));
Result := Total mod PoolSize;
end;
function TCrawler.GetPlainText(S: string): string;
var
i, j, k, l, LengthOfS, t1, t2, y1, y2: longint;
Cr, Lf, Tab: Char;
Flags, HasSpace: boolean;
LowerS, NewS: string;
begin
Cr := Chr(13); Lf := Chr(10); Tab := Chr(9);
k := 1; i := 1; LengthOfS := Length(S); NewS := S;
while i <= LengthOfS do begin
Flags := false;
while (NewS[i] = Cr) or (NewS[i] = Lf) do begin
Inc(i); Flags := i > LengthOfS;
if Flags then break;
end;
if not Flags then NewS[k] := NewS[i] else Dec(k);
Inc(i); Inc(k);
end;
setlength(NewS, k - 1);
{Clear enters in page}
NewS := StringReplace(NewS, '</p>', Chr(13) + Chr(10), [rfReplaceAll, rfIgnoreCase]);
NewS := StringReplace(NewS, '<br>', Chr(13) + Chr(10), [rfReplaceAll, rfIgnoreCase]);
NewS := StringReplace(NewS, '</div>', Chr(13) + Chr(10), [rfReplaceAll, rfIgnoreCase]);
LowerS := LowerCase(NewS); LengthOfS := Length(NewS);
k := NaiveMatch(LowerS, '<script', 1, LengthOfS, 7); i := k;
l := NaiveMatch(LowerS, '</script>', k + 7, LengthOfS, 9);
while l > 0 do begin
l := l + 9;
k := NaiveMatch(LowerS, '<script', l, LengthOfS, 7);
if k = 0 then k := LengthOfS + 1;
Move(NewS[l], NewS[i], k - l);
i := i + k - l;
l := NaiveMatch(LowerS, '</script>', k + 7, LengthOfS, 9);
end;
if i > 0 then setlength(NewS, i - 1);
{Clearup scripts}
LowerS := LowerCase(NewS); LengthOfS := Length(NewS);
k := NaiveMatch(LowerS, '<style', 1, LengthOfS, 6); i := k;
l := NaiveMatch(LowerS, '</style>', k + 6, LengthOfS, 8);
while l > 0 do begin
l := l + 8;
k := NaiveMatch(LowerS, '<style', l, LengthOfS, 6);
if k = 0 then k := LengthOfS + 1;
Move(NewS[l], NewS[i], k - l);
i := i + k - l;
l := NaiveMatch(LowerS, '</style>', k + 6, LengthOfS, 8);
end;
if i > 0 then setlength(NewS, i - 1);
{Clearup style code}
LowerS := LowerCase(NewS); LengthOfS := Length(NewS);
k := NaiveMatch(LowerS, '<', 1, LengthOfS, 1); i := k;
l := NaiveMatch(LowerS, '>', k + 1, LengthOfS, 1);
while l > 0 do begin
repeat
t1 := 0; t2 := 0;
for j := k to l do begin
if LowerS[j] = '"' then Inc(t1);
if LowerS[j] = '''' then Inc(t2);
end;
y1 := t1 mod 2; y2 := t2 mod 2;
if (y1 > 0) or (y2 > 0) then
l := NaiveMatch(LowerS, '>', l + 1, LengthOfS, 1);
until (l = 0) or ((y1 = 0) and (y2 = 0));
if l = 0 then break;
{ignore the > in "..." or '....'}
l := l + 1;
k := NaiveMatch(LowerS, '<', l, LengthOfS, 1);
if k = 0 then k := LengthOfS + 1;
Move(NewS[l], NewS[i], k - l);
i := i + k - l;
l := NaiveMatch(LowerS, '>', k + 1, LengthOfS, 1);
end;
if i > 0 then setlength(NewS, i - 1);
{Clear control code in <>}
for i := 0 to 9 do begin
NewS := StringReplace(NewS, SourceTags1[i], DestTags[i], [rfReplaceAll, rfIgnoreCase]);
NewS := StringReplace(NewS, SourceTags2[i], DestTags[i], [rfReplaceAll, rfIgnoreCase]);
end;
{replace marks}
NewS := StringReplace(NewS, ' ', ' ', [rfReplaceAll]);
LengthOfS := Length(NewS);
for i := 1 to LengthOfS do if NewS[i] = Tab then NewS[i] := ' ';
k := 1; i := 1; LengthOfS := Length(NewS);
while i <= LengthOfS do begin
Flags := false; HasSpace := false;
while (NewS[i] = ' ') do begin
Inc(i); Flags := i > LengthOfS;
HasSpace := true;
if Flags then break;
end;
if HasSpace then Dec(i);
if not Flags then NewS[k] := NewS[i] else Dec(k);
Inc(i); Inc(k);
end;
setlength(NewS, k - 1);
NewS := StringReplace(NewS, Lf + ' ', Lf, [rfReplaceAll]);
NewS := StringReplace(NewS, ' ' + Cr, Cr, [rfReplaceAll]);
{trim spaces and enters}
Result := Trim(NewS);
end;
function TCrawler.NaiveMatch(S, T: string; Start, LengthOfS, LengthOfT: longint): longint;
var
i, j, k: longint;
Success: boolean;
begin
Success := false;
for i := Start to LengthOfS do begin
Success := true; k := i;
for j := 1 to LengthOfT do begin
if S[k] <> T[j] then begin
Success := false;
break;
end;
Inc(k);
end;
if Success then begin
Result := i;
break;
end;
end;
if not Success then Result := 0;
end;
procedure TCrawler.Init;
var
i: longint;
begin
ImageUrls := TStringList.Create;
LinkUrls := TStringList.Create;
for i := 0 to PoolSize - 1 do begin
LinkPool[i] := TStringList.Create;
LinkPool[i].Sorted := true;
end;
end;
procedure TCrawler.PageAnalysis(SrcHTML, SrcUrl: string);
var
i, j, k, l, LengthOfHTML, HashCode: longint;
StrQuot, StrSpace, StrTriangle, StrQuot2, StrNewline, StrCross: longint;
RootUrl, HostName, LowerHTML, SubUrl, DestUrl, Header: string;
begin
ImageUrls.Clear; LinkUrls.Clear;
RootUrl := SrcUrl; Header := 'http://';
if LowerCase(Copy(RootUrl, 1, 6)) = 'ftp://' then begin
Delete(RootUrl, 1, 6);
Header := 'ftp://';
end;
if LowerCase(Copy(RootUrl, 1, 7)) = 'http://' then Delete(RootUrl, 1, 7);
if LowerCase(Copy(RootUrl, 1, 8)) = 'https://' then begin
Delete(RootUrl, 1, 8);
Header := 'https://';
end;
while RootUrl[Length(RootUrl)] = '/' do begin
Delete(RootUrl, Length(RootUrl), 1);
if RootUrl = '' then break;
end;
if RootUrl = '' then Exit;
k := Pos('/', RootUrl);
if k > 0 then HostName := Copy(RootUrl, 1, k - 1) else HostName := RootUrl;
LengthOfHTML := Length(SrcHTML);
LowerHTML := LowerCase(SrcHTML);
{Parsing Links}
k := NaiveMatch(LowerHTML, '<a href=', 1, LengthOfHTML, 8);
while k > 0 do begin
k := k + 8; l := maxlongint;
StrQuot := NaiveMatch(LowerHTML, Chr(39), k + 1, LengthOfHTML, 1);
if (StrQuot < l) and (StrQuot > 0) then l := StrQuot;
StrTriangle := NaiveMatch(LowerHTML, '>', k, LengthOfHTML, 1);
if (StrTriangle < l) and (StrTriangle > 0) then l := StrTriangle;
StrSpace := NaiveMatch(LowerHTML, ' ', k, LengthOfHTML, 1);
if (StrSpace < l) and (StrSpace > 0) then l := StrSpace;
StrCross := NaiveMatch(LowerHTML, '#', k, LengthOfHTML, 1);
if (StrCross < l) and (StrCross > 0) then l := StrCross;
StrQuot2 := NaiveMatch(LowerHTML, '"', k + 1, LengthOfHTML, 1);
if (StrQuot2 < l) and (StrQuot2 > 0) then l := StrQuot2;
StrNewline := NaiveMatch(LowerHTML, Chr(10), k, LengthOfHTML, 1);
if (StrNewline < l) and (StrNewline > 0) then l := StrNewline;
if l < maxlongint then begin
SubUrl := TrimRight(Copy(SrcHTML, k, l - k));
if SubUrl <> '' then begin
while SubUrl[1] = '"' do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
while SubUrl[1] = Chr(39) do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
if ('ftp://' = LowerCase(Copy(SubUrl, 1, 6))) or
('http://' = LowerCase(Copy(SubUrl, 1, 7))) or
('https://' = LowerCase(Copy(SubUrl, 1, 8))) then
DestUrl := SubUrl
else begin
if SubUrl[1] = '/' then
DestUrl := Header + HostName + SubUrl
else
DestUrl := Header + RootUrl + '/' + SubUrl;
end;
HashCode := Hash(DestUrl);
if LinkPool[HashCode].IndexOf(DestUrl) = -1 then begin
LinkUrls.Add(DestUrl);
LinkPool[HashCode].Add(DestUrl);
if (LowerCase(Copy(DestUrl, Length(DestUrl) - 3, 4)) = '.jpg') or
(LowerCase(Copy(DestUrl, Length(DestUrl) - 3, 4)) = '.bmp') or
(LowerCase(Copy(DestUrl, Length(DestUrl) - 4, 5)) = '.jpeg') then begin
ImageUrls.Add(DestUrl);
end;
end;
end;
end;
end;
k := NaiveMatch(LowerHTML, '<a href=', l, LengthOfHTML, 8);
end else break;
end;
{Parsing Image Links}
k := NaiveMatch(LowerHTML, '<img src=', 1, LengthOfHTML, 9);
while k > 0 do begin
k := k + 9; l := maxlongint;
StrQuot := NaiveMatch(LowerHTML, Chr(39), k + 1, LengthOfHTML, 1);
if (StrQuot < l) and (StrQuot > 0) then l := StrQuot;
StrTriangle := NaiveMatch(LowerHTML, '>', k, LengthOfHTML, 1);
if (StrTriangle < l) and (StrTriangle > 0) then l := StrTriangle;
StrSpace := NaiveMatch(LowerHTML, ' ', k, LengthOfHTML, 1);
if (StrSpace < l) and (StrSpace > 0) then l := StrSpace;
StrQuot2 := NaiveMatch(LowerHTML, '"', k + 1, LengthOfHTML, 1);
if (StrQuot2 < l) and (StrQuot2 > 0) then l := StrQuot2;
StrNewline := NaiveMatch(LowerHTML, Chr(10), k, LengthOfHTML, 1);
if (StrNewline < l) and (StrNewline > 0) then l := StrNewline;
if l < maxlongint then begin
SubUrl := TrimRight(Copy(SrcHTML, k, l - k));
if SubUrl <> '' then begin
while SubUrl[1] = '"' do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
while SubUrl[1] = Chr(39) do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
if (LowerCase(Copy(SubUrl, Length(SubUrl) - 3, 4)) = '.jpg') or
(LowerCase(Copy(SubUrl, Length(SubUrl) - 3, 4)) = '.bmp') or
(LowerCase(Copy(SubUrl, Length(SubUrl) - 4, 5)) = '.jpeg') then begin
if ('ftp://' = LowerCase(Copy(SubUrl, 1, 6))) or
('http://' = LowerCase(Copy(SubUrl, 1, 7))) or
('https://' = LowerCase(Copy(SubUrl, 1, 8))) then
DestUrl := SubUrl
else begin
if SubUrl[1] = '/' then
DestUrl := Header + HostName + SubUrl
else
DestUrl := Header + RootUrl + '/' + SubUrl;
end;
HashCode := Hash(DestUrl);
if LinkPool[HashCode].IndexOf(DestUrl) = -1 then begin
ImageUrls.Add(DestUrl);
LinkPool[HashCode].Add(DestUrl);
end;
end;
end;
end;
end;
k := NaiveMatch(LowerHTML, '<img src=', l, LengthOfHTML, 9);
end else break;
end;
{Get Title Text}
TitleText := '';
k := NaiveMatch(LowerHTML, '<title>', 1, LengthOfHTML, 7);
if k > 0 then begin
k := k + 7;
l := NaiveMatch(LowerHTML, '</title>', k, LengthOfHTML, 8);
if l > 0 then
TitleText := Copy(SrcHTML, k, l - k);
end;
TitleText := GetPlainText(TitleText);
{Get Body Text}
BodyText := '';
k := NaiveMatch(LowerHTML, '<body', 1, LengthOfHTML, 5);
if k > 0 then begin
k := NaiveMatch(LowerHTML, '>', k + 5, LengthOfHTML, 1);
if k > 0 then begin
k := k + 1;
l := NaiveMatch(LowerHTML, '</body>', k, LengthOfHTML, 7);
if l = 0 then l := LengthOfHTML;
BodyText := Copy(SrcHTML, k, l - k);
end;
end;
BodyText := GetPlainText(BodyText);
end;
function TCrawler.LoadFromFile(FileName: string): boolean;
var
i, j, n: longint;
s: string;
begin
try
AssignFile(Input, FileName); Reset(Input);
for i := 0 to PoolSize - 1 do begin
ReadLn(n);
for j := 0 to n - 1 do begin
ReadLn(s);
LinkPool[i].Add(s);
end;
end;
CloseFile(Input);
Result := true;
except
Result := false;
end;
end;
function TCrawler.SaveToFile(FileName: string): boolean;
var
i, j: longint;
begin
try
AssignFile(Output, FileName); Rewrite(Output);
for i := 0 to PoolSize - 1 do begin
WriteLn(LinkPool[i].Count);
for j := 0 to LinkPool[i].Count - 1 do WriteLn(LinkPool[i].Strings[j]);
end;
CloseFile(Output);
Result := true;
except
Result := false;
end;
end;
destructor TCrawler.Destroy;
var
i: longint;
begin
ImageUrls.Free;
LinkUrls.Free;
for i := 0 to PoolSize - 1 do LinkPool[i].Free;
inherited;
end;
end.
{
A Class for Page Analysis
Author: Liu LIU
Mail: Geo(DOT)Cra(AT)Gmail(DOT)com
Web: http://www.aivisoft.net/
}
unit Crawl;
interface
uses
Math, Windows, SysUtils, Variants, Classes, unitypes;
const
PoolSize: longint = $100;
SourceTags1: array[0..9] of string = ('<', '>', '&', '"', '®',
'©', '™', ' ', ' ', ' ');
SourceTags2: array[0..9] of string = ('<', '>', '&', '"', '®',
'©', '&trade', '&ensp', '&emsp', ' ');
DestTags: array[0..9] of Char = ('<', '>', '&', '"', '?', '?', '?', ' ', ' ', ' ');
type
TCrawler = class
private
LinkPool: array[0..$FF] of TStringList;
function Hash(S: string): longint;
function GetPlainText(S: string): string;
function NaiveMatch(S, T: string; Start, LengthOfS, LengthOfT: longint): longint;
public
ImageUrls, LinkUrls: TStringList;
BodyText, TitleText: string;
procedure Init;
procedure PageAnalysis(SrcHTML, SrcUrl: string);
function LoadFromFile(FileName: string): boolean;
function SaveToFile(FileName: string): boolean;
destructor Destroy; override;
end;
implementation
function TCrawler.Hash(S: string): longint;
var
i, Total: longint;
begin
Total := 0;
for i := 1 to Length(S) do Inc(Total, Ord(S[i]));
Result := Total mod PoolSize;
end;
function TCrawler.GetPlainText(S: string): string;
var
i, j, k, l, LengthOfS, t1, t2, y1, y2: longint;
Cr, Lf, Tab: Char;
Flags, HasSpace: boolean;
LowerS, NewS: string;
begin
Cr := Chr(13); Lf := Chr(10); Tab := Chr(9);
k := 1; i := 1; LengthOfS := Length(S); NewS := S;
while i <= LengthOfS do begin
Flags := false;
while (NewS[i] = Cr) or (NewS[i] = Lf) do begin
Inc(i); Flags := i > LengthOfS;
if Flags then break;
end;
if not Flags then NewS[k] := NewS[i] else Dec(k);
Inc(i); Inc(k);
end;
setlength(NewS, k - 1);
{Clear enters in page}
NewS := StringReplace(NewS, '</p>', Chr(13) + Chr(10), [rfReplaceAll, rfIgnoreCase]);
NewS := StringReplace(NewS, '<br>', Chr(13) + Chr(10), [rfReplaceAll, rfIgnoreCase]);
NewS := StringReplace(NewS, '</div>', Chr(13) + Chr(10), [rfReplaceAll, rfIgnoreCase]);
LowerS := LowerCase(NewS); LengthOfS := Length(NewS);
k := NaiveMatch(LowerS, '<script', 1, LengthOfS, 7); i := k;
l := NaiveMatch(LowerS, '</script>', k + 7, LengthOfS, 9);
while l > 0 do begin
l := l + 9;
k := NaiveMatch(LowerS, '<script', l, LengthOfS, 7);
if k = 0 then k := LengthOfS + 1;
Move(NewS[l], NewS[i], k - l);
i := i + k - l;
l := NaiveMatch(LowerS, '</script>', k + 7, LengthOfS, 9);
end;
if i > 0 then setlength(NewS, i - 1);
{Clearup scripts}
LowerS := LowerCase(NewS); LengthOfS := Length(NewS);
k := NaiveMatch(LowerS, '<style', 1, LengthOfS, 6); i := k;
l := NaiveMatch(LowerS, '</style>', k + 6, LengthOfS, 8);
while l > 0 do begin
l := l + 8;
k := NaiveMatch(LowerS, '<style', l, LengthOfS, 6);
if k = 0 then k := LengthOfS + 1;
Move(NewS[l], NewS[i], k - l);
i := i + k - l;
l := NaiveMatch(LowerS, '</style>', k + 6, LengthOfS, 8);
end;
if i > 0 then setlength(NewS, i - 1);
{Clearup style code}
LowerS := LowerCase(NewS); LengthOfS := Length(NewS);
k := NaiveMatch(LowerS, '<', 1, LengthOfS, 1); i := k;
l := NaiveMatch(LowerS, '>', k + 1, LengthOfS, 1);
while l > 0 do begin
repeat
t1 := 0; t2 := 0;
for j := k to l do begin
if LowerS[j] = '"' then Inc(t1);
if LowerS[j] = '''' then Inc(t2);
end;
y1 := t1 mod 2; y2 := t2 mod 2;
if (y1 > 0) or (y2 > 0) then
l := NaiveMatch(LowerS, '>', l + 1, LengthOfS, 1);
until (l = 0) or ((y1 = 0) and (y2 = 0));
if l = 0 then break;
{ignore the > in "..." or '....'}
l := l + 1;
k := NaiveMatch(LowerS, '<', l, LengthOfS, 1);
if k = 0 then k := LengthOfS + 1;
Move(NewS[l], NewS[i], k - l);
i := i + k - l;
l := NaiveMatch(LowerS, '>', k + 1, LengthOfS, 1);
end;
if i > 0 then setlength(NewS, i - 1);
{Clear control code in <>}
for i := 0 to 9 do begin
NewS := StringReplace(NewS, SourceTags1[i], DestTags[i], [rfReplaceAll, rfIgnoreCase]);
NewS := StringReplace(NewS, SourceTags2[i], DestTags[i], [rfReplaceAll, rfIgnoreCase]);
end;
{replace marks}
NewS := StringReplace(NewS, ' ', ' ', [rfReplaceAll]);
LengthOfS := Length(NewS);
for i := 1 to LengthOfS do if NewS[i] = Tab then NewS[i] := ' ';
k := 1; i := 1; LengthOfS := Length(NewS);
while i <= LengthOfS do begin
Flags := false; HasSpace := false;
while (NewS[i] = ' ') do begin
Inc(i); Flags := i > LengthOfS;
HasSpace := true;
if Flags then break;
end;
if HasSpace then Dec(i);
if not Flags then NewS[k] := NewS[i] else Dec(k);
Inc(i); Inc(k);
end;
setlength(NewS, k - 1);
NewS := StringReplace(NewS, Lf + ' ', Lf, [rfReplaceAll]);
NewS := StringReplace(NewS, ' ' + Cr, Cr, [rfReplaceAll]);
{trim spaces and enters}
Result := Trim(NewS);
end;
function TCrawler.NaiveMatch(S, T: string; Start, LengthOfS, LengthOfT: longint): longint;
var
i, j, k: longint;
Success: boolean;
begin
Success := false;
for i := Start to LengthOfS do begin
Success := true; k := i;
for j := 1 to LengthOfT do begin
if S[k] <> T[j] then begin
Success := false;
break;
end;
Inc(k);
end;
if Success then begin
Result := i;
break;
end;
end;
if not Success then Result := 0;
end;
procedure TCrawler.Init;
var
i: longint;
begin
ImageUrls := TStringList.Create;
LinkUrls := TStringList.Create;
for i := 0 to PoolSize - 1 do begin
LinkPool[i] := TStringList.Create;
LinkPool[i].Sorted := true;
end;
end;
procedure TCrawler.PageAnalysis(SrcHTML, SrcUrl: string);
var
i, j, k, l, LengthOfHTML, HashCode: longint;
StrQuot, StrSpace, StrTriangle, StrQuot2, StrNewline, StrCross: longint;
RootUrl, HostName, LowerHTML, SubUrl, DestUrl, Header: string;
begin
ImageUrls.Clear; LinkUrls.Clear;
RootUrl := SrcUrl; Header := 'http://';
if LowerCase(Copy(RootUrl, 1, 6)) = 'ftp://' then begin
Delete(RootUrl, 1, 6);
Header := 'ftp://';
end;
if LowerCase(Copy(RootUrl, 1, 7)) = 'http://' then Delete(RootUrl, 1, 7);
if LowerCase(Copy(RootUrl, 1, 8)) = 'https://' then begin
Delete(RootUrl, 1, 8);
Header := 'https://';
end;
while RootUrl[Length(RootUrl)] = '/' do begin
Delete(RootUrl, Length(RootUrl), 1);
if RootUrl = '' then break;
end;
if RootUrl = '' then Exit;
k := Pos('/', RootUrl);
if k > 0 then HostName := Copy(RootUrl, 1, k - 1) else HostName := RootUrl;
LengthOfHTML := Length(SrcHTML);
LowerHTML := LowerCase(SrcHTML);
{Parsing Links}
k := NaiveMatch(LowerHTML, '<a href=', 1, LengthOfHTML, 8);
while k > 0 do begin
k := k + 8; l := maxlongint;
StrQuot := NaiveMatch(LowerHTML, Chr(39), k + 1, LengthOfHTML, 1);
if (StrQuot < l) and (StrQuot > 0) then l := StrQuot;
StrTriangle := NaiveMatch(LowerHTML, '>', k, LengthOfHTML, 1);
if (StrTriangle < l) and (StrTriangle > 0) then l := StrTriangle;
StrSpace := NaiveMatch(LowerHTML, ' ', k, LengthOfHTML, 1);
if (StrSpace < l) and (StrSpace > 0) then l := StrSpace;
StrCross := NaiveMatch(LowerHTML, '#', k, LengthOfHTML, 1);
if (StrCross < l) and (StrCross > 0) then l := StrCross;
StrQuot2 := NaiveMatch(LowerHTML, '"', k + 1, LengthOfHTML, 1);
if (StrQuot2 < l) and (StrQuot2 > 0) then l := StrQuot2;
StrNewline := NaiveMatch(LowerHTML, Chr(10), k, LengthOfHTML, 1);
if (StrNewline < l) and (StrNewline > 0) then l := StrNewline;
if l < maxlongint then begin
SubUrl := TrimRight(Copy(SrcHTML, k, l - k));
if SubUrl <> '' then begin
while SubUrl[1] = '"' do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
while SubUrl[1] = Chr(39) do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
if ('ftp://' = LowerCase(Copy(SubUrl, 1, 6))) or
('http://' = LowerCase(Copy(SubUrl, 1, 7))) or
('https://' = LowerCase(Copy(SubUrl, 1, 8))) then
DestUrl := SubUrl
else begin
if SubUrl[1] = '/' then
DestUrl := Header + HostName + SubUrl
else
DestUrl := Header + RootUrl + '/' + SubUrl;
end;
HashCode := Hash(DestUrl);
if LinkPool[HashCode].IndexOf(DestUrl) = -1 then begin
LinkUrls.Add(DestUrl);
LinkPool[HashCode].Add(DestUrl);
if (LowerCase(Copy(DestUrl, Length(DestUrl) - 3, 4)) = '.jpg') or
(LowerCase(Copy(DestUrl, Length(DestUrl) - 3, 4)) = '.bmp') or
(LowerCase(Copy(DestUrl, Length(DestUrl) - 4, 5)) = '.jpeg') then begin
ImageUrls.Add(DestUrl);
end;
end;
end;
end;
end;
k := NaiveMatch(LowerHTML, '<a href=', l, LengthOfHTML, 8);
end else break;
end;
{Parsing Image Links}
k := NaiveMatch(LowerHTML, '<img src=', 1, LengthOfHTML, 9);
while k > 0 do begin
k := k + 9; l := maxlongint;
StrQuot := NaiveMatch(LowerHTML, Chr(39), k + 1, LengthOfHTML, 1);
if (StrQuot < l) and (StrQuot > 0) then l := StrQuot;
StrTriangle := NaiveMatch(LowerHTML, '>', k, LengthOfHTML, 1);
if (StrTriangle < l) and (StrTriangle > 0) then l := StrTriangle;
StrSpace := NaiveMatch(LowerHTML, ' ', k, LengthOfHTML, 1);
if (StrSpace < l) and (StrSpace > 0) then l := StrSpace;
StrQuot2 := NaiveMatch(LowerHTML, '"', k + 1, LengthOfHTML, 1);
if (StrQuot2 < l) and (StrQuot2 > 0) then l := StrQuot2;
StrNewline := NaiveMatch(LowerHTML, Chr(10), k, LengthOfHTML, 1);
if (StrNewline < l) and (StrNewline > 0) then l := StrNewline;
if l < maxlongint then begin
SubUrl := TrimRight(Copy(SrcHTML, k, l - k));
if SubUrl <> '' then begin
while SubUrl[1] = '"' do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
while SubUrl[1] = Chr(39) do begin
Delete(SubUrl, 1, 1);
if SubUrl = '' then break;
end;
if SubUrl <> '' then begin
if (LowerCase(Copy(SubUrl, Length(SubUrl) - 3, 4)) = '.jpg') or
(LowerCase(Copy(SubUrl, Length(SubUrl) - 3, 4)) = '.bmp') or
(LowerCase(Copy(SubUrl, Length(SubUrl) - 4, 5)) = '.jpeg') then begin
if ('ftp://' = LowerCase(Copy(SubUrl, 1, 6))) or
('http://' = LowerCase(Copy(SubUrl, 1, 7))) or
('https://' = LowerCase(Copy(SubUrl, 1, 8))) then
DestUrl := SubUrl
else begin
if SubUrl[1] = '/' then
DestUrl := Header + HostName + SubUrl
else
DestUrl := Header + RootUrl + '/' + SubUrl;
end;
HashCode := Hash(DestUrl);
if LinkPool[HashCode].IndexOf(DestUrl) = -1 then begin
ImageUrls.Add(DestUrl);
LinkPool[HashCode].Add(DestUrl);
end;
end;
end;
end;
end;
k := NaiveMatch(LowerHTML, '<img src=', l, LengthOfHTML, 9);
end else break;
end;
{Get Title Text}
TitleText := '';
k := NaiveMatch(LowerHTML, '<title>', 1, LengthOfHTML, 7);
if k > 0 then begin
k := k + 7;
l := NaiveMatch(LowerHTML, '</title>', k, LengthOfHTML, 8);
if l > 0 then
TitleText := Copy(SrcHTML, k, l - k);
end;
TitleText := GetPlainText(TitleText);
{Get Body Text}
BodyText := '';
k := NaiveMatch(LowerHTML, '<body', 1, LengthOfHTML, 5);
if k > 0 then begin
k := NaiveMatch(LowerHTML, '>', k + 5, LengthOfHTML, 1);
if k > 0 then begin
k := k + 1;
l := NaiveMatch(LowerHTML, '</body>', k, LengthOfHTML, 7);
if l = 0 then l := LengthOfHTML;
BodyText := Copy(SrcHTML, k, l - k);
end;
end;
BodyText := GetPlainText(BodyText);
end;
function TCrawler.LoadFromFile(FileName: string): boolean;
var
i, j, n: longint;
s: string;
begin
try
AssignFile(Input, FileName); Reset(Input);
for i := 0 to PoolSize - 1 do begin
ReadLn(n);
for j := 0 to n - 1 do begin
ReadLn(s);
LinkPool[i].Add(s);
end;
end;
CloseFile(Input);
Result := true;
except
Result := false;
end;
end;
function TCrawler.SaveToFile(FileName: string): boolean;
var
i, j: longint;
begin
try
AssignFile(Output, FileName); Rewrite(Output);
for i := 0 to PoolSize - 1 do begin
WriteLn(LinkPool[i].Count);
for j := 0 to LinkPool[i].Count - 1 do WriteLn(LinkPool[i].Strings[j]);
end;
CloseFile(Output);
Result := true;
except
Result := false;
end;
end;
destructor TCrawler.Destroy;
var
i: longint;
begin
ImageUrls.Free;
LinkUrls.Free;
for i := 0 to PoolSize - 1 do LinkPool[i].Free;
inherited;
end;
end.
- 网页爬虫的一个类模块。
- 一个简单的网页爬虫
- 一个简单的JAVA网页爬虫
- 一个简单的JAVA网页爬虫
- 用java写了一个非常简单的网页爬虫
- 黑马程序员 分享一个牛叉的网页爬虫
- java实现网页爬虫的一个小例子
- 小爬虫-一个网页上的.jpg图片下载下来
- 一个简单的网络爬虫---爬取网页中的图片
- java开发一个简单的网页爬虫训练正则表达式
- 用cURL 制作一个简单的网页爬虫
- 爬虫实例:爬取一个网页上的图片地址
- 爬虫的网页
- python 爬虫(二)一个带下载进度的网页抓取小爬虫
- 制作一个简单的网页爬虫爬取一张网页的特定图片
- Jsoup解析网页(二)爬虫解析一个完整的网页
- Python爬虫----网页下载器和urllib2模块及对应的实例
- Python 网络爬虫 005 (编程) 如何编写一个可以 下载(或叫:爬取)一个网页 的网络爬虫
- UML中关联关系和依赖关系的区别
- IP欺骗原理精解和防范技术
- 长期发短信会降低智商 甚至比吸大麻还严重
- SQL Server 2005 Beta 2 Transact-SQL 增强功能
- 在ASP中集合(内置对象)的使用时遵守OO规则
- 网页爬虫的一个类模块。
- 2006新的开始~
- 带有层次关系的css
- jsp中javabean的应用(转载)
- 使用JavaBean进行数据库的连接(转载)
- javaBean连接mysql中的问题
- 学习xhtml的一点教训
- 2k5 全球手机销售数量及 2k5Q4五大手机制造商 市场份额排名
- 蘑菇管理