2019-10-18 09:58:24 +00:00
|
|
|
import asyncio, codecs, ipaddress, re, signal, socket, traceback, typing
|
2019-09-19 13:54:44 +00:00
|
|
|
import urllib.error, urllib.parse, uuid
|
2018-10-10 12:41:58 +00:00
|
|
|
import json as _json
|
2019-11-20 14:42:34 +00:00
|
|
|
import bs4, netifaces, requests, tornado.httpclient
|
|
|
|
from src import IRCBot, utils
|
2018-10-03 12:22:37 +00:00
|
|
|
|
2019-09-02 12:25:48 +00:00
|
|
|
REGEX_URL = re.compile("https?://\S+", re.I)
|
|
|
|
|
|
|
|
PAIRED_CHARACTERS = ["<>", "()"]
|
2019-04-24 14:46:54 +00:00
|
|
|
|
2019-07-02 13:10:18 +00:00
|
|
|
# best-effort tidying up of URLs
|
2019-07-02 13:15:49 +00:00
|
|
|
def url_sanitise(url: str):
|
2019-07-08 11:54:06 +00:00
|
|
|
if not urllib.parse.urlparse(url).scheme:
|
|
|
|
url = "http://%s" % url
|
|
|
|
|
2019-09-02 12:25:48 +00:00
|
|
|
for pair_start, pair_end in PAIRED_CHARACTERS:
|
2019-07-02 13:10:18 +00:00
|
|
|
# trim ")" from the end only if there's not a "(" to match it
|
|
|
|
# google.com/) -> google.com/
|
|
|
|
# google.com/() -> google.com/()
|
|
|
|
# google.com/()) -> google.com/()
|
2019-09-02 12:25:48 +00:00
|
|
|
if url.endswith(pair_end):
|
|
|
|
if pair_start in url:
|
2019-09-19 12:51:27 +00:00
|
|
|
open_index = url.rfind(pair_start)
|
|
|
|
other_index = url.rfind(pair_end, 0, len(url)-1)
|
2019-09-02 12:25:48 +00:00
|
|
|
if not other_index == -1 and other_index < open_index:
|
|
|
|
url = url[:-1]
|
|
|
|
else:
|
|
|
|
url = url[:-1]
|
2019-07-02 13:10:18 +00:00
|
|
|
return url
|
|
|
|
|
2019-11-20 14:42:34 +00:00
|
|
|
USERAGENT = "Mozilla/5.0 (compatible; BitBot/%s; +%s" % (
|
|
|
|
IRCBot.VERSION, IRCBot.URL)
|
2018-10-03 12:22:37 +00:00
|
|
|
|
2018-10-10 13:05:15 +00:00
|
|
|
RESPONSE_MAX = (1024*1024)*100
|
2019-02-26 11:18:50 +00:00
|
|
|
SOUP_CONTENT_TYPES = ["text/html", "text/xml", "application/xml"]
|
2019-09-11 14:28:13 +00:00
|
|
|
DECODE_CONTENT_TYPES = ["text/plain"]+SOUP_CONTENT_TYPES
|
2019-09-25 14:32:09 +00:00
|
|
|
UTF8_CONTENT_TYPES = ["application/json"]
|
2018-10-10 13:05:15 +00:00
|
|
|
|
2018-10-30 17:49:35 +00:00
|
|
|
class HTTPException(Exception):
|
2018-10-10 13:25:44 +00:00
|
|
|
pass
|
|
|
|
class HTTPTimeoutException(HTTPException):
|
2019-06-27 17:28:08 +00:00
|
|
|
def __init__(self):
|
|
|
|
Exception.__init__(self, "HTTP request timed out")
|
2018-10-10 13:25:44 +00:00
|
|
|
class HTTPParsingException(HTTPException):
|
2019-09-18 09:52:05 +00:00
|
|
|
def __init__(self, message: str, data: str):
|
|
|
|
Exception.__init__(self,
|
2019-09-18 13:20:59 +00:00
|
|
|
"%s\n%s" % ((message or "HTTP parsing failed"), data))
|
2019-02-28 23:28:45 +00:00
|
|
|
class HTTPWrongContentTypeException(HTTPException):
|
2019-06-28 22:00:48 +00:00
|
|
|
def __init__(self, message: str=None):
|
|
|
|
Exception.__init__(self,
|
|
|
|
message or "HTTP request gave wrong content type")
|
2018-10-10 13:25:44 +00:00
|
|
|
|
2018-10-10 14:07:04 +00:00
|
|
|
def throw_timeout():
|
|
|
|
raise HTTPTimeoutException()
|
2018-10-10 13:25:44 +00:00
|
|
|
|
2019-09-11 16:44:07 +00:00
|
|
|
class Request(object):
|
2019-09-19 13:54:44 +00:00
|
|
|
def __init__(self, url: str,
|
2019-09-11 16:44:07 +00:00
|
|
|
get_params: typing.Dict[str, str]={}, post_data: typing.Any=None,
|
|
|
|
headers: typing.Dict[str, str]={},
|
|
|
|
|
2019-09-16 09:57:18 +00:00
|
|
|
json: bool=False, json_body: bool=False, allow_redirects: bool=True,
|
2019-09-11 16:44:07 +00:00
|
|
|
check_content_type: bool=True, parse: bool=False,
|
|
|
|
detect_encoding: bool=True,
|
|
|
|
|
2019-09-19 13:54:44 +00:00
|
|
|
method: str="GET", parser: str="lxml", id: str=None,
|
2019-09-25 14:32:09 +00:00
|
|
|
fallback_encoding: str=None, content_type: str=None,
|
2019-09-19 13:54:44 +00:00
|
|
|
proxy: str=None, useragent: str=None,
|
2019-09-11 16:44:07 +00:00
|
|
|
|
|
|
|
**kwargs):
|
2019-09-19 13:54:44 +00:00
|
|
|
self.id = id or str(uuid.uuid4())
|
|
|
|
|
2019-09-11 16:44:07 +00:00
|
|
|
self.set_url(url)
|
|
|
|
self.method = method.upper()
|
|
|
|
self.get_params = get_params
|
|
|
|
self.post_data = post_data
|
|
|
|
self.headers = headers
|
|
|
|
|
|
|
|
self.json = json
|
2019-09-16 09:57:18 +00:00
|
|
|
self.json_body = json_body
|
2019-09-11 16:44:07 +00:00
|
|
|
self.allow_redirects = allow_redirects
|
|
|
|
self.check_content_type = check_content_type
|
|
|
|
self.parse = parse
|
|
|
|
self.detect_encoding = detect_encoding
|
|
|
|
|
|
|
|
self.parser = parser
|
|
|
|
self.fallback_encoding = fallback_encoding
|
|
|
|
self.content_type = content_type
|
2019-09-11 16:53:37 +00:00
|
|
|
self.proxy = proxy
|
2019-09-12 09:41:50 +00:00
|
|
|
self.useragent = useragent
|
2019-09-11 16:44:07 +00:00
|
|
|
|
|
|
|
if kwargs:
|
|
|
|
if method == "POST":
|
|
|
|
self.post_data = kwargs
|
|
|
|
else:
|
|
|
|
self.get_params.update(kwargs)
|
|
|
|
|
|
|
|
def set_url(self, url: str):
|
2019-10-18 09:58:24 +00:00
|
|
|
parts = urllib.parse.urlparse(url)
|
|
|
|
if not parts.scheme:
|
|
|
|
parts = urllib.parse.urlparse("http://%s" % url)
|
|
|
|
|
|
|
|
netloc = codecs.encode(parts.netloc, "idna").decode("ascii")
|
|
|
|
params = "" if not parts.params else (";%s" % parts.params)
|
|
|
|
query = "" if not parts.query else ("?%s" % parts.query)
|
|
|
|
fragment = "" if not parts.fragment else ("#%s" % parts.fragment)
|
|
|
|
|
|
|
|
self.url = (
|
|
|
|
f"{parts.scheme}://{netloc}{parts.path}{params}{query}{fragment}")
|
2019-09-11 16:44:07 +00:00
|
|
|
|
|
|
|
def get_headers(self) -> typing.Dict[str, str]:
|
|
|
|
headers = self.headers.copy()
|
|
|
|
if not "Accept-Language" in headers:
|
|
|
|
headers["Accept-Language"] = "en-GB"
|
|
|
|
if not "User-Agent" in headers:
|
2019-11-20 14:42:34 +00:00
|
|
|
headers["User-Agent"] = self.useragent or USERAGENT
|
2019-09-11 16:44:07 +00:00
|
|
|
if not "Content-Type" in headers and self.content_type:
|
|
|
|
headers["Content-Type"] = self.content_type
|
|
|
|
return headers
|
|
|
|
|
|
|
|
def get_body(self) -> typing.Any:
|
2019-09-16 09:57:18 +00:00
|
|
|
if not self.post_data == None:
|
|
|
|
if self.content_type == "application/json" or self.json_body:
|
|
|
|
return _json.dumps(self.post_data)
|
|
|
|
else:
|
|
|
|
return self.post_data
|
2019-09-11 16:44:07 +00:00
|
|
|
else:
|
2019-09-16 09:57:18 +00:00
|
|
|
return None
|
2019-09-11 16:44:07 +00:00
|
|
|
|
2018-12-11 22:26:38 +00:00
|
|
|
class Response(object):
|
|
|
|
def __init__(self, code: int, data: typing.Any,
|
2019-10-05 21:56:56 +00:00
|
|
|
headers: typing.Dict[str, str], encoding: str):
|
2018-12-11 22:26:38 +00:00
|
|
|
self.code = code
|
|
|
|
self.data = data
|
|
|
|
self.headers = headers
|
2019-10-05 21:56:56 +00:00
|
|
|
self.content_type = headers.get("Content-Type", "").split(";", 1)[0]
|
2019-09-17 12:41:11 +00:00
|
|
|
self.encoding = encoding
|
2018-12-11 22:26:38 +00:00
|
|
|
|
2019-09-09 13:10:58 +00:00
|
|
|
def _meta_content(s: str) -> typing.Dict[str, str]:
|
|
|
|
out = {}
|
2019-09-09 13:53:11 +00:00
|
|
|
for keyvalue in s.split(";"):
|
2019-09-09 13:10:58 +00:00
|
|
|
key, _, value = keyvalue.strip().partition("=")
|
|
|
|
out[key] = value
|
|
|
|
return out
|
|
|
|
|
|
|
|
def _find_encoding(soup: bs4.BeautifulSoup) -> typing.Optional[str]:
|
2019-09-09 13:39:19 +00:00
|
|
|
if not soup.meta == None:
|
|
|
|
meta_charset = soup.meta.get("charset")
|
|
|
|
if not meta_charset == None:
|
|
|
|
return meta_charset
|
|
|
|
|
2019-09-09 13:10:58 +00:00
|
|
|
meta_content_type = soup.findAll("meta",
|
|
|
|
{"http-equiv": lambda v: (v or "").lower() == "content-type"})
|
|
|
|
if meta_content_type:
|
|
|
|
return _meta_content(meta_content_type[0].get("content"))["charset"]
|
2019-09-09 13:39:19 +00:00
|
|
|
|
|
|
|
doctype = [item for item in soup.contents if isinstance(item,
|
|
|
|
bs4.Doctype)] or None
|
|
|
|
if doctype and doctype[0] == "html":
|
|
|
|
return "utf8"
|
|
|
|
|
2019-09-09 13:25:01 +00:00
|
|
|
return None
|
2019-09-09 13:10:58 +00:00
|
|
|
|
2019-09-11 16:44:07 +00:00
|
|
|
def request(request_obj: typing.Union[str, Request], **kwargs) -> Response:
|
2019-10-25 22:32:00 +00:00
|
|
|
if isinstance(request_obj, str):
|
2019-09-11 16:44:07 +00:00
|
|
|
request_obj = Request(request_obj, **kwargs)
|
|
|
|
return _request(request_obj)
|
2018-10-03 12:22:37 +00:00
|
|
|
|
2019-09-11 16:44:07 +00:00
|
|
|
def _request(request_obj: Request) -> Response:
|
2018-10-10 12:41:58 +00:00
|
|
|
|
2019-10-25 22:32:00 +00:00
|
|
|
def _wrap() -> Response:
|
2019-10-04 12:07:09 +00:00
|
|
|
headers = request_obj.get_headers()
|
2019-09-17 12:41:11 +00:00
|
|
|
response = requests.request(
|
|
|
|
request_obj.method,
|
|
|
|
request_obj.url,
|
|
|
|
headers=headers,
|
|
|
|
params=request_obj.get_params,
|
|
|
|
data=request_obj.get_body(),
|
|
|
|
allow_redirects=request_obj.allow_redirects,
|
|
|
|
stream=True
|
|
|
|
)
|
|
|
|
response_content = response.raw.read(RESPONSE_MAX,
|
|
|
|
decode_content=True)
|
2019-09-17 16:33:23 +00:00
|
|
|
if not response.raw.read(1) == b"":
|
2019-09-17 12:41:11 +00:00
|
|
|
raise ValueError("Response too large")
|
|
|
|
|
2019-10-04 12:07:09 +00:00
|
|
|
headers = utils.CaseInsensitiveDict(dict(response.headers))
|
2019-09-17 12:41:11 +00:00
|
|
|
our_response = Response(response.status_code, response_content,
|
2019-10-05 21:56:56 +00:00
|
|
|
headers=headers, encoding=response.encoding)
|
2019-09-17 12:41:11 +00:00
|
|
|
return our_response
|
|
|
|
|
|
|
|
try:
|
2019-09-17 12:44:14 +00:00
|
|
|
response = utils.deadline_process(_wrap, seconds=5)
|
2019-09-17 12:41:11 +00:00
|
|
|
except utils.DeadlineExceededException:
|
|
|
|
raise HTTPTimeoutException()
|
2019-05-28 09:22:35 +00:00
|
|
|
|
2019-09-11 16:44:07 +00:00
|
|
|
encoding = response.encoding or request_obj.fallback_encoding
|
2019-09-17 12:41:11 +00:00
|
|
|
|
2019-09-25 14:32:09 +00:00
|
|
|
if not encoding:
|
2019-10-04 12:07:09 +00:00
|
|
|
if response.content_type in UTF8_CONTENT_TYPES:
|
2019-09-25 14:32:09 +00:00
|
|
|
encoding = "utf8"
|
|
|
|
else:
|
|
|
|
encoding = "iso-8859-1"
|
|
|
|
|
2019-09-11 16:44:07 +00:00
|
|
|
if (request_obj.detect_encoding and
|
2019-10-04 12:07:09 +00:00
|
|
|
response.content_type and
|
|
|
|
response.content_type in SOUP_CONTENT_TYPES):
|
2019-09-17 12:41:11 +00:00
|
|
|
souped = bs4.BeautifulSoup(response.data, request_obj.parser)
|
2019-09-09 15:17:26 +00:00
|
|
|
encoding = _find_encoding(souped) or encoding
|
2019-09-09 13:10:58 +00:00
|
|
|
|
2019-06-04 12:47:03 +00:00
|
|
|
def _decode_data():
|
2019-09-17 12:41:11 +00:00
|
|
|
return response.data.decode(encoding)
|
2019-06-04 12:47:03 +00:00
|
|
|
|
2019-09-11 16:44:07 +00:00
|
|
|
if request_obj.parse:
|
|
|
|
if (not request_obj.check_content_type or
|
2019-10-04 12:07:09 +00:00
|
|
|
response.content_type in SOUP_CONTENT_TYPES):
|
2019-09-11 16:44:07 +00:00
|
|
|
souped = bs4.BeautifulSoup(_decode_data(), request_obj.parser)
|
2019-09-17 12:41:11 +00:00
|
|
|
response.data = souped
|
|
|
|
return response
|
2019-02-27 15:16:08 +00:00
|
|
|
else:
|
2019-02-28 23:28:45 +00:00
|
|
|
raise HTTPWrongContentTypeException(
|
2019-10-04 12:07:09 +00:00
|
|
|
"Tried to soup non-html/non-xml data (%s)" %
|
|
|
|
response.content_type)
|
2018-10-09 21:16:04 +00:00
|
|
|
|
2019-09-17 12:41:11 +00:00
|
|
|
if request_obj.json and response.data:
|
2019-09-09 15:17:26 +00:00
|
|
|
data = _decode_data()
|
2018-10-03 12:22:37 +00:00
|
|
|
try:
|
2019-09-17 12:41:11 +00:00
|
|
|
response.data = _json.loads(data)
|
|
|
|
return response
|
2018-10-10 13:25:44 +00:00
|
|
|
except _json.decoder.JSONDecodeError as e:
|
2019-09-02 12:27:44 +00:00
|
|
|
raise HTTPParsingException(str(e), data)
|
2018-10-10 12:41:58 +00:00
|
|
|
|
2019-10-04 12:07:09 +00:00
|
|
|
if response.content_type in DECODE_CONTENT_TYPES:
|
2019-09-17 12:41:11 +00:00
|
|
|
response.data = _decode_data()
|
|
|
|
return response
|
2019-09-11 14:28:13 +00:00
|
|
|
else:
|
2019-09-17 12:41:11 +00:00
|
|
|
return response
|
2018-10-03 12:22:37 +00:00
|
|
|
|
2019-09-19 13:02:48 +00:00
|
|
|
class RequestManyException(Exception):
|
|
|
|
pass
|
2019-09-19 13:54:44 +00:00
|
|
|
def request_many(requests: typing.List[Request]) -> typing.Dict[str, Response]:
|
2019-07-08 10:43:09 +00:00
|
|
|
responses = {}
|
|
|
|
|
2019-09-19 13:54:44 +00:00
|
|
|
async def _request(request):
|
2019-07-08 12:46:27 +00:00
|
|
|
client = tornado.httpclient.AsyncHTTPClient()
|
2019-09-19 13:54:44 +00:00
|
|
|
url = request.url
|
|
|
|
if request.get_params:
|
|
|
|
url = "%s?%s" % (url, urllib.parse.urlencode(request.get_params))
|
|
|
|
|
|
|
|
t_request = tornado.httpclient.HTTPRequest(
|
|
|
|
request.url,
|
|
|
|
connect_timeout=2, request_timeout=2,
|
|
|
|
method=request.method,
|
|
|
|
body=request.get_body(),
|
|
|
|
headers=request.get_headers(),
|
|
|
|
follow_redirects=request.allow_redirects,
|
|
|
|
)
|
2019-07-08 12:46:27 +00:00
|
|
|
|
2019-09-17 16:09:19 +00:00
|
|
|
try:
|
2019-09-19 13:54:44 +00:00
|
|
|
response = await client.fetch(t_request)
|
2019-09-17 16:09:19 +00:00
|
|
|
except:
|
2019-09-19 13:02:48 +00:00
|
|
|
raise RequestManyException(
|
2019-09-19 14:30:27 +00:00
|
|
|
"request_many failed for %s" % url)
|
2019-07-08 12:46:27 +00:00
|
|
|
|
|
|
|
headers = utils.CaseInsensitiveDict(dict(response.headers))
|
|
|
|
data = response.body.decode("utf8")
|
2019-09-19 13:54:44 +00:00
|
|
|
responses[request.id] = Response(response.code, data, headers, "utf8")
|
2019-07-08 10:43:09 +00:00
|
|
|
|
2019-07-08 12:59:48 +00:00
|
|
|
loop = asyncio.new_event_loop()
|
2019-07-08 12:46:27 +00:00
|
|
|
awaits = []
|
2019-09-19 13:54:44 +00:00
|
|
|
for request in requests:
|
|
|
|
awaits.append(_request(request))
|
2019-07-08 13:50:11 +00:00
|
|
|
task = asyncio.wait(awaits, loop=loop, timeout=5)
|
2019-07-08 12:46:27 +00:00
|
|
|
loop.run_until_complete(task)
|
2019-07-08 12:59:48 +00:00
|
|
|
loop.close()
|
2019-07-08 12:46:27 +00:00
|
|
|
|
2019-07-08 10:43:09 +00:00
|
|
|
return responses
|
|
|
|
|
2019-09-11 16:53:49 +00:00
|
|
|
class Client(object):
|
|
|
|
request = request
|
|
|
|
request_many = request_many
|
|
|
|
|
2018-10-30 14:58:48 +00:00
|
|
|
def strip_html(s: str) -> str:
|
2018-10-03 12:22:37 +00:00
|
|
|
return bs4.BeautifulSoup(s, "lxml").get_text()
|
|
|
|
|
2019-04-25 14:58:58 +00:00
|
|
|
def resolve_hostname(hostname: str) -> typing.List[str]:
|
|
|
|
try:
|
|
|
|
addresses = socket.getaddrinfo(hostname, None, 0, socket.SOCK_STREAM)
|
|
|
|
except:
|
|
|
|
return []
|
|
|
|
return [address[-1][0] for address in addresses]
|
|
|
|
|
|
|
|
def is_ip(addr: str) -> bool:
|
|
|
|
try:
|
|
|
|
ipaddress.ip_address(addr)
|
|
|
|
except ValueError:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2019-09-30 14:15:20 +00:00
|
|
|
def host_permitted(hostname: str) -> bool:
|
2019-04-25 14:58:58 +00:00
|
|
|
if is_ip(hostname):
|
|
|
|
ips = [ipaddress.ip_address(hostname)]
|
|
|
|
else:
|
|
|
|
ips = [ipaddress.ip_address(ip) for ip in resolve_hostname(hostname)]
|
|
|
|
|
|
|
|
for interface in netifaces.interfaces():
|
|
|
|
links = netifaces.ifaddresses(interface)
|
2019-04-25 16:48:51 +00:00
|
|
|
|
|
|
|
for link in links.get(netifaces.AF_INET, []
|
2019-04-25 16:50:41 +00:00
|
|
|
)+links.get(netifaces.AF_INET6, []):
|
2019-04-25 14:58:58 +00:00
|
|
|
address = ipaddress.ip_address(link["addr"].split("%", 1)[0])
|
|
|
|
if address in ips:
|
2019-09-30 14:15:20 +00:00
|
|
|
return False
|
2019-09-30 14:12:01 +00:00
|
|
|
for ip in ips:
|
|
|
|
if ip.version == 6 and ip.ipv4_mapped:
|
|
|
|
ip = ip.ipv4_mapped
|
|
|
|
|
|
|
|
if (ip.is_loopback or
|
|
|
|
ip.is_link_local or
|
|
|
|
ip.is_multicast or
|
|
|
|
ip.is_private):
|
2019-09-30 14:15:20 +00:00
|
|
|
return False
|
2019-04-25 16:48:51 +00:00
|
|
|
|
2019-09-30 14:15:20 +00:00
|
|
|
return True
|