User:Laurier Rochon/prototyping/??????????soft

From XPUB & Lens-Based wiki
< User:Laurier Rochon
Revision as of 17:03, 12 February 2011 by Laurier Rochon (talk | contribs) (Created page with "== Feb 12 2011 == Scraping blog urls, checking them against the current archive + storing them in a tab-separated file. <source lang="python"> #!/usr/bin/python2.6 import urll...")
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)

Feb 12 2011

Scraping blog urls, checking them against the current archive + storing them in a tab-separated file.

#!/usr/bin/python2.6

import urllib2
import json
from datetime import date
import os

#txt = '../cgiscrape/blogs'
txt = 'blogs'

start=0
scrapedate=date.today()
entries=[]
urllist=[]

if not os.path.exists(txt):
	f = open(txt,'w')
	f.close()
else:
	f = open(txt,'r')
	data = f.read()
	if len(data)>0:
		urls = data.split('\n')
		for a in urls:
			line = a.split('\t')
			if len(line)>1:
				urllist.append(line[2])
c=0
while start<64:
	url = ('https://ajax.googleapis.com/ajax/services/search/blogs?v=1.0&q=myself&start='+ str (start)+'&rsz=large')
 
	f = urllib2.urlopen(url)
	data = json.load(f)
	for r in data['responseData']['results']:
		if r['postUrl'] not in urllist:
			entry = "%s\t%s\t%s\t%s\t%s\t%s" % (scrapedate, r['title'], r['postUrl'], r['publishedDate'], r['blogUrl'], r['author'])
			entry = entry.encode("utf-8")
			entries.append(entry)
			c = c+1
	start += 8

print 'added %s entries' % (c)

se = '\n'.join(map(str, entries))
f = open(txt,'a')
f.write(se)
f.close()