2019-07-08 11:45:10 +00:00
|
|
|
import asyncio, ipaddress, re, signal, socket, traceback, typing
|
2019-04-25 14:58:58 +00:00
|
|
|
import urllib.error, urllib.parse
|
2018-10-10 12:41:58 +00:00
|
|
|
import json as _json
|
2019-07-08 11:45:10 +00:00
|
|
|
import bs4, netifaces, requests
|
|
|
|
import tornado.httpclient
|
2018-12-11 22:30:05 +00:00
|
|
|
from src import utils
|
2018-10-03 12:22:37 +00:00
|
|
|
|
2019-05-03 03:50:51 +00:00
|
|
|
REGEX_URL = re.compile("https?://[A-Z0-9{}]+".format(re.escape("-._~:/%?#[]@!$&'()*+,;=")), re.I)
|
2019-04-24 14:46:54 +00:00
|
|
|
|
2019-07-02 13:10:18 +00:00
|
|
|
# best-effort tidying up of URLs
|
2019-07-02 13:15:49 +00:00
|
|
|
def url_sanitise(url: str):
|
2019-07-08 11:54:06 +00:00
|
|
|
if not urllib.parse.urlparse(url).scheme:
|
|
|
|
url = "http://%s" % url
|
|
|
|
|
2019-07-02 13:10:18 +00:00
|
|
|
if url.endswith(")"):
|
|
|
|
# trim ")" from the end only if there's not a "(" to match it
|
|
|
|
# google.com/) -> google.com/
|
|
|
|
# google.com/() -> google.com/()
|
|
|
|
# google.com/()) -> google.com/()
|
|
|
|
|
|
|
|
if "(" in url:
|
|
|
|
open_index = url.rfind("(")
|
|
|
|
other_index = url.rfind(")", 0, len(url)-1)
|
|
|
|
if other_index == -1 or other_index < open_index:
|
|
|
|
return url
|
|
|
|
return url[:-1]
|
|
|
|
return url
|
|
|
|
|
2018-10-03 12:22:37 +00:00
|
|
|
USER_AGENT = ("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 "
|
|
|
|
"(KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36")
|
|
|
|
|
2018-10-10 13:05:15 +00:00
|
|
|
RESPONSE_MAX = (1024*1024)*100
|
2019-02-26 11:18:50 +00:00
|
|
|
SOUP_CONTENT_TYPES = ["text/html", "text/xml", "application/xml"]
|
2018-10-10 13:05:15 +00:00
|
|
|
|
2018-10-30 17:49:35 +00:00
|
|
|
class HTTPException(Exception):
|
2018-10-10 13:25:44 +00:00
|
|
|
pass
|
|
|
|
class HTTPTimeoutException(HTTPException):
|
2019-06-27 17:28:08 +00:00
|
|
|
def __init__(self):
|
|
|
|
Exception.__init__(self, "HTTP request timed out")
|
2018-10-10 13:25:44 +00:00
|
|
|
class HTTPParsingException(HTTPException):
|
2019-06-28 22:00:48 +00:00
|
|
|
def __init__(self, message: str=None):
|
|
|
|
Exception.__init__(self, message or "HTTP parsing failed")
|
2019-02-28 23:28:45 +00:00
|
|
|
class HTTPWrongContentTypeException(HTTPException):
|
2019-06-28 22:00:48 +00:00
|
|
|
def __init__(self, message: str=None):
|
|
|
|
Exception.__init__(self,
|
|
|
|
message or "HTTP request gave wrong content type")
|
2018-10-10 13:25:44 +00:00
|
|
|
|
2018-10-10 14:07:04 +00:00
|
|
|
def throw_timeout():
|
|
|
|
raise HTTPTimeoutException()
|
2018-10-10 13:25:44 +00:00
|
|
|
|
2018-12-11 22:26:38 +00:00
|
|
|
class Response(object):
|
|
|
|
def __init__(self, code: int, data: typing.Any,
|
|
|
|
headers: typing.Dict[str, str]):
|
|
|
|
self.code = code
|
|
|
|
self.data = data
|
|
|
|
self.headers = headers
|
|
|
|
|
|
|
|
def request(url: str, method: str="GET", get_params: dict={},
|
2018-10-30 14:58:48 +00:00
|
|
|
post_data: typing.Any=None, headers: dict={},
|
|
|
|
json_data: typing.Any=None, code: bool=False, json: bool=False,
|
2018-12-11 22:26:38 +00:00
|
|
|
soup: bool=False, parser: str="lxml", fallback_encoding: str="utf8",
|
2019-08-05 14:41:02 +00:00
|
|
|
allow_redirects: bool=True, check_content_type: bool=True
|
2018-12-11 22:26:38 +00:00
|
|
|
) -> Response:
|
2018-10-10 12:41:58 +00:00
|
|
|
|
2018-10-03 12:22:37 +00:00
|
|
|
if not urllib.parse.urlparse(url).scheme:
|
|
|
|
url = "http://%s" % url
|
|
|
|
|
2018-10-10 12:41:58 +00:00
|
|
|
if not "Accept-Language" in headers:
|
|
|
|
headers["Accept-Language"] = "en-GB"
|
|
|
|
if not "User-Agent" in headers:
|
|
|
|
headers["User-Agent"] = USER_AGENT
|
|
|
|
|
2018-10-25 13:09:19 +00:00
|
|
|
signal.signal(signal.SIGALRM, lambda _1, _2: throw_timeout())
|
2018-10-10 13:25:44 +00:00
|
|
|
signal.alarm(5)
|
|
|
|
try:
|
|
|
|
response = requests.request(
|
|
|
|
method.upper(),
|
|
|
|
url,
|
|
|
|
headers=headers,
|
|
|
|
params=get_params,
|
|
|
|
data=post_data,
|
|
|
|
json=json_data,
|
2019-06-26 16:53:16 +00:00
|
|
|
allow_redirects=allow_redirects,
|
2018-10-10 13:25:44 +00:00
|
|
|
stream=True
|
|
|
|
)
|
|
|
|
response_content = response.raw.read(RESPONSE_MAX, decode_content=True)
|
|
|
|
except TimeoutError:
|
|
|
|
raise HTTPTimeoutException()
|
|
|
|
finally:
|
|
|
|
signal.signal(signal.SIGALRM, signal.SIG_IGN)
|
2018-10-10 12:41:58 +00:00
|
|
|
|
2018-12-11 22:30:57 +00:00
|
|
|
response_headers = utils.CaseInsensitiveDict(dict(response.headers))
|
2019-08-02 16:33:16 +00:00
|
|
|
content_type = response.headers.get("Content-Type", "").split(";", 1)[0]
|
2019-05-28 09:22:35 +00:00
|
|
|
|
2019-06-04 12:47:03 +00:00
|
|
|
def _decode_data():
|
|
|
|
return response_content.decode(response.encoding or fallback_encoding)
|
|
|
|
|
2019-02-27 15:16:08 +00:00
|
|
|
if soup:
|
2019-08-05 14:41:02 +00:00
|
|
|
if not check_content_type or content_type in SOUP_CONTENT_TYPES:
|
2019-06-04 12:47:03 +00:00
|
|
|
soup = bs4.BeautifulSoup(_decode_data(), parser)
|
2019-02-27 15:16:08 +00:00
|
|
|
return Response(response.status_code, soup, response_headers)
|
|
|
|
else:
|
2019-02-28 23:28:45 +00:00
|
|
|
raise HTTPWrongContentTypeException(
|
2019-08-05 14:41:02 +00:00
|
|
|
"Tried to soup non-html/non-xml data (%s)" % content_type)
|
2018-10-09 21:16:04 +00:00
|
|
|
|
2019-06-04 12:47:03 +00:00
|
|
|
data = _decode_data()
|
2018-10-10 12:41:58 +00:00
|
|
|
if json and data:
|
2018-10-03 12:22:37 +00:00
|
|
|
try:
|
2018-12-11 22:26:38 +00:00
|
|
|
return Response(response.status_code, _json.loads(data),
|
|
|
|
response_headers)
|
2018-10-10 13:25:44 +00:00
|
|
|
except _json.decoder.JSONDecodeError as e:
|
|
|
|
raise HTTPParsingException(str(e))
|
2018-10-10 12:41:58 +00:00
|
|
|
|
2018-12-11 22:26:38 +00:00
|
|
|
return Response(response.status_code, data, response_headers)
|
2018-10-03 12:22:37 +00:00
|
|
|
|
2019-07-08 10:43:09 +00:00
|
|
|
def request_many(urls: typing.List[str]) -> typing.Dict[str, Response]:
|
|
|
|
responses = {}
|
|
|
|
|
2019-07-08 12:46:27 +00:00
|
|
|
async def _request(url):
|
|
|
|
client = tornado.httpclient.AsyncHTTPClient()
|
|
|
|
request = tornado.httpclient.HTTPRequest(url, method="GET",
|
|
|
|
connect_timeout=2, request_timeout=2)
|
|
|
|
|
2019-07-08 12:51:02 +00:00
|
|
|
response = await client.fetch(request)
|
2019-07-08 12:46:27 +00:00
|
|
|
|
|
|
|
headers = utils.CaseInsensitiveDict(dict(response.headers))
|
|
|
|
data = response.body.decode("utf8")
|
|
|
|
responses[url] = Response(response.code, data, headers)
|
2019-07-08 10:43:09 +00:00
|
|
|
|
2019-07-08 12:59:48 +00:00
|
|
|
loop = asyncio.new_event_loop()
|
2019-07-08 12:46:27 +00:00
|
|
|
awaits = []
|
|
|
|
for url in urls:
|
|
|
|
awaits.append(_request(url))
|
2019-07-08 13:50:11 +00:00
|
|
|
task = asyncio.wait(awaits, loop=loop, timeout=5)
|
2019-07-08 12:46:27 +00:00
|
|
|
loop.run_until_complete(task)
|
2019-07-08 12:59:48 +00:00
|
|
|
loop.close()
|
2019-07-08 12:46:27 +00:00
|
|
|
|
2019-07-08 10:43:09 +00:00
|
|
|
return responses
|
|
|
|
|
2018-10-30 14:58:48 +00:00
|
|
|
def strip_html(s: str) -> str:
|
2018-10-03 12:22:37 +00:00
|
|
|
return bs4.BeautifulSoup(s, "lxml").get_text()
|
|
|
|
|
2019-04-25 14:58:58 +00:00
|
|
|
def resolve_hostname(hostname: str) -> typing.List[str]:
|
|
|
|
try:
|
|
|
|
addresses = socket.getaddrinfo(hostname, None, 0, socket.SOCK_STREAM)
|
|
|
|
except:
|
|
|
|
return []
|
|
|
|
return [address[-1][0] for address in addresses]
|
|
|
|
|
|
|
|
def is_ip(addr: str) -> bool:
|
|
|
|
try:
|
|
|
|
ipaddress.ip_address(addr)
|
|
|
|
except ValueError:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
def is_localhost(hostname: str) -> bool:
|
|
|
|
if is_ip(hostname):
|
|
|
|
ips = [ipaddress.ip_address(hostname)]
|
|
|
|
else:
|
|
|
|
ips = [ipaddress.ip_address(ip) for ip in resolve_hostname(hostname)]
|
|
|
|
|
|
|
|
for interface in netifaces.interfaces():
|
|
|
|
links = netifaces.ifaddresses(interface)
|
2019-04-25 16:48:51 +00:00
|
|
|
|
|
|
|
for link in links.get(netifaces.AF_INET, []
|
2019-04-25 16:50:41 +00:00
|
|
|
)+links.get(netifaces.AF_INET6, []):
|
2019-04-25 14:58:58 +00:00
|
|
|
address = ipaddress.ip_address(link["addr"].split("%", 1)[0])
|
|
|
|
if address in ips:
|
|
|
|
return True
|
2019-04-25 16:48:51 +00:00
|
|
|
|
2019-04-25 14:58:58 +00:00
|
|
|
return False
|