Utility for web scraping.
Project description
ScrapingHelper
How to use
class URL
- validator()
- unquote() / decode()
- enquote() / encode()
In [1]: from scrapinghelper import URL
In [2]: u = URL()
In [3]: u.validator('http://sample.om')
Out[3]: True
In [4]: u.validator('http://sample.')
Out[4]: False
In [5]: url = URL('http://www.example.com/sample?src=git&encode=jp')
In [6]: url.is_valid
Out[6]: True
In [7]: url.attrs
Out[7]:
{'url': 'http://www.example.com/sample?src=git&encode=jp',
'is_valid': True,
'scheme': 'http',
'netloc': 'www.example.com',
'username': None,
'password': None,
'hostname': 'www.example.com',
'port': None,
'path': '/sample',
'params': '',
'query': 'src=git&encode=jp',
'fragment': '',
'basename': 'sample'}
In [8]: url = URL('http://www.example.com/データ.txt')
In [9]: url.attrs
Out[9]:
{'url': 'http://www.example.com/%E3%83%87%E3%83%BC%E3%82%BF.txt',
'is_valid': True,
'scheme': 'http',
'netloc': 'www.example.com',
'username': None,
'password': None,
'hostname': 'www.example.com',
'port': None,
'path': '/%E3%83%87%E3%83%BC%E3%82%BF.txt',
'params': '',
'query': '',
'fragment': '',
'basename': 'データ.txt'}
In [10]: url.query
Out[10]: 'src=git&encode=jp'
In [11]: url.get_query_val('src')
Out[11]: 'git'
In [12]: url.set_query_val('src', 'csv')
Out[12]: 'http://www.example.com/sample?src=csv&encode=jp'
In [13]: url
Out[13]: http://www.example.com/sample?src=git&encode=jp
In [14]: url.set_query_val('src', 'csv',update=True)
Out[14]: 'http://www.example.com/sample?src=csv&encode=jp'
In [15]: url
Out[15]: http://www.example.com/sample?src=csv&encode=jp
In [16]: url.get_root_address()
Out[16]: 'http://www.example.com'
In [17]: url.strip_query()
Out[17]: 'http://www.example.com/sample'
In [18]: url = URL('https://ja.wikipedia.org/wiki/日本語')
In [19]: url
Out[19]: https://ja.wikipedia.org/wiki/%E6%97%A5%E6%9C%AC%E8%AA%9E
In [20]: url.unquote()
Out[20]: 'https://ja.wikipedia.org/wiki/日本語'
In [21]: url.decode()
Out[21]: 'https://ja.wikipedia.org/wiki/日本語'
class Scraper
- get_random_user_agent()
- get_random_ipv4()
- get_random_ipv6()
- request()
- request_async()
- get_filename()
- get_links()
- get_texts()
- download_file()
n [1]: from scrapinghelper import Scraper
In [2]: sc = Scraper()
In [3]: sc.get_random_user_agent()
Out[3]: 'Mozilla/5.0 (CrKey armv7l 1.5.16041) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.0 Safari/537.36'
In [4]: sc.get_random_user_agent()
Out[4]: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/'
In [5]: sc.get_random_user_agent()
Out[5]: 'Mozilla/5.0 (CrKey armv7l 1.5.16041) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.0 Safari/537.36'
In [6]: sc.get_random_user_agent()
Out[6]: 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1'
In [7]: sc.get_random_user_agent()
Out[7]: 'Mozilla/5.0 (CrKey armv7l 1.5.16041) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.0 Safari/537.36'
In [8]: sc.get_random_user_agent()
Out[8]: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'
In [9]: sc.get_random_user_agent()
Out[9]: 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1'
In [10]: sc.get_random_ipv4()
Out[10]: '121.162.233.190'
In [11]: sc.get_random_ipv4()
Out[11]: '178.172.98.169'
In [12]: sc.get_random_ipv6()
Out[12]: '3d18:cb77:5387:3ee9:1e60:d5f3:d987:283a'
In [13]: sc.get_random_ipv6()
Out[13]: 'cfc1:a00d:9013:37a0:ed94:5e92:7fe7:e356'
In [14]:
the request headers with user agents will be automatically created.
In [1]: # %load examples/check_headers.py
...: import scrapinghelper as sch
...: from pprint import pprint
...:
...: scraper = sch.Scraper()
...: response = scraper.request('http://httpbin.org/headers')
...:
...: pprint(response.json())
{'headers': {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en',
'Host': 'httpbin.org',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 8_1_2 like Mac OS X) '
'AppleWebKit/600.1.4 (KHTML, like Gecko) '
'Mobile/12B440',
'X-Amzn-Trace-Id': 'Root=1-62de3626-07daf491262b96356486884d'}}
In [2]:
In [2]: from scrapinghelper import URL, Scraper, LogConfig
...:
...: logconfig = LogConfig()
...: logconfig.level = 'INFO'
...: sc = Scraper(logconfig=logconfig)
...:
...: url = URL('https://www.houjin-bangou.nta.go.jp/download/zenken/#csv-unic
...: ode')
...: response = sc.request(url)
...:
...: content = response.content
...: print(f'code: {response.status_code}')
...:
code: 200
In [3]: from scrapinghelper import Scraper, LogConfig
...:
...: logconfig = LogConfig()
...: logconfig.level = 'DEBUG'
...: sc = Scraper(logconfig=logconfig)
...:
...: url = URL('https://www.houjin-bangou.nta.go.jp/download/zenken/#csv-unic
...: ode')
...: response = sc.request(url)
...:
...: content = response.content
...: print(f'code: {response.status_code}')
2022-06-02T19:34:31.885790+0900 LOG configure: {'handlers': [{'sink': <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>, 'level': 'DEBUG', 'format': '<green>{time}</green> <level>{message}</level>', 'colorize': True, 'serialize': False}]}
2022-06-02T19:34:31.886414+0900 URL: https://www.houjin-bangou.nta.go.jp/download/zenken/#csv-unicode
2022-06-02T14:34:32.092599+0900 response status_code: 200
code: 200
In [4]: logconfig
Out[4]: LogConfig(sink=None, level=DEBUG, format=<green>{time}</green> <level>{message}</level>, colorize=True, serialize=False
In [5]:
render() and PROXY
if passed render=False
, request()
skip call render()
.
render()
of requests-html does not work with proxy.
scrapinghelper support render()
with proxy.
In [2]: # %load check_ipaddress.py
...: from scrapinghelper import Scraper, URL
...:
...: # tiny socks5 proxy
...: proxies = {
...: 'http':'socks5://127.0.0.1:9050',
...: 'https':'socks5://127.0.0.1:9050'
...: }
...: url = 'https://httpbin.org/ip'
...:
...: scraper = Scraper()
...: response = scraper.request(url)
...: print(response.html.text)
...: response = scraper.request(url,proxies=proxies)
...: print(response.html.text)
...: response = scraper.request(url,proxies=proxies, render=False)
...: print(response.html.text)
...:
{ "origin": "221.186.103.38" }
{ "origin": "185.195.71.3" }
{ "origin": "185.195.71.3" }
In [3]:
PROXY
Get public proxies list from url. default is github.com/hookzof. Please keep in mind, there proxies are ABSOLUTELY NO WARRANTY.
In [2]: # %load examples/get_proxy.py
...: from scrapinghelper import ProxyManager
...:
...: p = ProxyManager()
...: print(p.proxies[:2])
...:
...: print(p.next_proxy())
...: print(p.next_proxy())
...: print(p.get_random_proxy())
...:
...:
[{'http': socks5://85.221.247.236:8080, 'https': socks5://85.221.247.236:8080}, {'http': socks5://109.201.9.100:8080, 'https': socks5://109.201.9.100:8080}]
{'http': socks5://85.221.247.236:8080, 'https': socks5://85.221.247.236:8080}
{'http': socks5://109.201.9.100:8080, 'https': socks5://109.201.9.100:8080}
{'http': socks5://1.224.3.122:3888, 'https': socks5://1.224.3.122:3888}
In [3]:
you can filename as url. i.e.:
p = ProxyManager('file://./myproxy_list.txt')
there uri expand as follows.
if proxies_url.startswith('file://.'):
proxies_url = proxies_url.replace('file://.','')
this_directory = Path(__file__).parent
proxies_url = f'file://{str(this_directory / proxies_url )}'
CAUTION if you use a free proxy to login to something or enter personal information and POST it, you must be assured that it will be leaked. Keep in mind, it is like writing your credit card number and security code on a postcard.
KNOWN PROBLEM
if you want to use this module(and/or requests_html, selenium) on ubuntu of VPN,you should try follows commands.
sudo apt install -y gconf-service libasound2 libatk1.0-0 libc6 libcairo2 libcups2 libdbus-1-3 libexpat1 libfontconfig1 libgcc1 libgconf-2-4 libgdk-pixbuf2.0-0 libglib2.0-0 libgtk-3-0 libnspr4 libpango-1.0-0 libpangocairo-1.0-0 libstdc++6 libx11-6 libx11-xcb1 libxcb1 libxcomposite1 libxcursor1 libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 ca-certificates fonts-liberation libappindicator1 libnss3 lsb-release xdg-utils wget
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
Hashes for scrapinghelper-0.4.1-py3-none-any.whl
Algorithm | Hash digest | |
---|---|---|
SHA256 | 51b16d2956a9b6a232c3975b9ee4a182537d46901d79c2628bcf7871f2c07ac5 |
|
MD5 | e5dde7e49fd0eed901449aab7a6797de |
|
BLAKE2b-256 | c6edef935a65281809395111ebe0c67b21d43479c0f00753398e1269a5e1eec1 |