Difference between revisions of "Python: Read URL"
Jump to navigation
Jump to search
Onnowpurbo (talk | contribs) |
Onnowpurbo (talk | contribs) |
||
(One intermediate revision by the same user not shown) | |||
Line 1: | Line 1: | ||
− | + | pip3 install urllib | |
− | + | pip3 install requests | |
− | + | pip3 install BeautifulSoup4 | |
==Contoh 1== | ==Contoh 1== | ||
Line 19: | Line 19: | ||
==Contoh 2== | ==Contoh 2== | ||
+ | import requests | ||
+ | import urllib.request | ||
+ | from bs4 import BeautifulSoup | ||
+ | |||
import urllib | import urllib | ||
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup | ||
− | url = " | + | url = "https://www.industry.co.id/read/49900/dongkrak-penjualan-tam-hadirkan-toyota-driving-experience" |
− | + | with urllib.request.urlopen(url) as url: | |
+ | html = url.read() | ||
soup = BeautifulSoup(html) | soup = BeautifulSoup(html) | ||
Line 29: | Line 34: | ||
for script in soup(["script", "style"]): | for script in soup(["script", "style"]): | ||
script.extract() # rip it out | script.extract() # rip it out | ||
− | |||
# get text | # get text | ||
text = soup.get_text() | text = soup.get_text() | ||
Line 40: | Line 44: | ||
text = '\n'.join(chunk for chunk in chunks if chunk) | text = '\n'.join(chunk for chunk in chunks if chunk) | ||
print(text) | print(text) | ||
− | |||
− | |||
==Pranala Menarik== | ==Pranala Menarik== | ||
* [[Python]] | * [[Python]] |
Latest revision as of 16:54, 12 October 2019
pip3 install urllib pip3 install requests pip3 install BeautifulSoup4
Contoh 1
import requests from BeautifulSoup import BeautifulSoup link = "http://www.somesite.com/details.pl?urn=2344" f = requests.get(link) print(f.text)
soup = BeautifulSoup(f) all_text = .join(soup.findAll(text=True)) print(all_text)
Contoh 2
import requests import urllib.request from bs4 import BeautifulSoup import urllib from bs4 import BeautifulSoup url = "https://www.industry.co.id/read/49900/dongkrak-penjualan-tam-hadirkan-toyota-driving-experience" with urllib.request.urlopen(url) as url: html = url.read() soup = BeautifulSoup(html) # kill all script and style elements for script in soup(["script", "style"]): script.extract() # rip it out # get text text = soup.get_text() # break into lines and remove leading and trailing space on each lines = (line.strip() for line in text.splitlines()) # break multi-headlines into a line each chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) # drop blank lines text = '\n'.join(chunk for chunk in chunks if chunk) print(text)