-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathRobot.java
More file actions
71 lines (54 loc) · 1.94 KB
/
Robot.java
File metadata and controls
71 lines (54 loc) · 1.94 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import java.net.*;
import java.io.*;
import java.util.*;
import java.io.BufferedReader;
public class Robot {
/**************************************************************
* Method purpose: It checks if a url passed as it's parameter
* is allowed to be crawled by the crawler
*
* Return value:
* -> true if crawling is allowed
* -> false if exception or crawling not allowed
**************************************************************/
public boolean isCrawlingAllowed(URL url) {
String strHost, strProtocol, strFile = " ";
final String robotsFile = "/robots.txt";
final String DISALLOW = "Disallow:";
try {
// Create the url for the host robots.txt file
strHost = url.getHost();
strProtocol = url.getProtocol();
URL nUrl = new URL(strProtocol, strHost, robotsFile);
// Get the file
BufferedReader in = new BufferedReader(new InputStreamReader(nUrl.openStream()));
String inputLine;
while ((inputLine = in.readLine()) != null)
strFile += "\n" + inputLine;
// close the reader stream
in.close();
} catch (MalformedURLException e) {
// Error with URL formation don't crawl
return false;
} catch (IOException e) {
// No robots.txt file so it's okay to crawl
return true;
}
// Search for "Disallow:" fields
String strURL = url.getFile();
int index = 0;
while ((index = strFile.indexOf(DISALLOW, index)) != -1) {
index += DISALLOW.length();
String strPath = strFile.substring(index);
StringTokenizer st = new StringTokenizer(strPath);
if (!st.hasMoreTokens())
break;
String strBadPath = st.nextToken();
// If path starts with Disallowed: then skip
if (strURL.indexOf(strBadPath) == 0)
return false;
}
// If we made it this far then the we are allowed to crawl the url
return true;
}
}