Simple Web Spider in Python
Revision as of 18:05, 12 January 2011 by Michael Murtaugh (talk | contribs) (→Opening an network connection with urllib2)
Opening an network connection with urllib2
import urllib2
request = urllib2.Request("http://www.volkskrant.nl/")
f=urllib2.urlopen(request)
print f.geturl()
print f.info()
print f.read()
Some sites require that you set the "User-Agent" header.
import urllib2
request = urllib2.Request("http://www.volkskrant.nl/")
request.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux x86_64; fr; rv:1.9.1.5) Gecko/20091109 Ubuntu/9.10 (karmic) Firefox/3.5.5")
f=urllib2.urlopen(request)
Get the URL of all images on a page
import urllib2, urlparse, html5lib, lxml
from lxml.cssselect import CSSSelector
request = urllib2.Request("http://www.volkskrant.nl/")
request.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux x86_64; fr; rv:1.9.1.5) Gecko/20091109 Ubuntu/9.10 (karmic) Firefox/3.5.5")
f=urllib2.urlopen(request)
# f.geturl(), f.info()
parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("lxml"), namespaceHTMLElements=False)
page = parser.parse(f)
for elt in CSSSelector('img[src]')(page):
href = urlparse.urljoin(f.geturl(), elt.attrib['src'])
print href