User:Lidia.Pereira/SDRII/SG: Difference between revisions
No edit summary |
No edit summary |
||
(4 intermediate revisions by the same user not shown) | |||
Line 1: | Line 1: | ||
Following my [[User:Lidia.Pereira/RWRMII/EY | essay]] I was quite interested in the idea of how implicit participation makes use of design choices to benefit from user generated content, thus contributing to information management systems which can be exploited for improving information retrieval/market research. In order for this process to be optimized, production of social relations becomes the new paradigm under post-industrialist market rule, for it is communication which produces economic value. Immaterial labor, then, both satisfies and produces a demand, ultimately forming subjectivity. | Following my [[User:Lidia.Pereira/RWRMII/EY | essay]] I was quite interested in the idea of how implicit participation makes use of design choices to benefit from user generated content, thus contributing to information management systems which can be exploited for improving information retrieval/market research. In order for this process to be optimized, production of social relations becomes the new paradigm under post-industrialist market rule, for it is communication which produces economic value. Immaterial labor, then, both satisfies and produces a demand, ultimately forming subjectivity. <br> | ||
Inspired by this idea, I'm experimenting with posting my browsing history on social media, which will ultimately start suggesting me people I might "like" to follow, pages I "might" like, subsequentely leading me to browse new links and so on and so forth. The idea is that, at some point, I'll stop having agency, playing the idea of a hetero-generative subjectivity to the extreme. | Inspired by this idea, I'm experimenting with posting my browsing history on social media, which will ultimately start suggesting me people I might "like" to follow, pages I "might" like, subsequentely leading me to browse new links and so on and so forth. The idea is that, at some point, I'll stop having agency, playing the idea of a hetero-generative subjectivity to the extreme. <br> | ||
The goal with this process (more than a project) is not to prove that my information is being tracked, which is now a more than well-known fact, but rather to explore the informational paths which inform subjectivity everyday, trying to analyze patterns and derive conclusions as to which factors contribute the most for this construction. <br> | |||
'''Mix Script (posts both on Facebook and Twitter) which also posts images on Facebook:'''<br> | |||
Seeing that Twitter's interface is not the best for posting pictures, I decided to only post them on Facebook. | |||
<syntaxhighlight lang="python"> | |||
import sqlite3, re, time, os | |||
import urllib2 | |||
from bs4 import BeautifulSoup | |||
import tweepy, facepy | |||
from facepy import GraphAPI | |||
from urlparse import urljoin | |||
consumer_key="aJiJkOEdd3YKiXVRyfnWDg" | |||
consumer_secret="8TDG4KqBLZPqcvjNFZb7qtJ7brSyqnuTNd1Y9yQrEM" | |||
access_token="2308513993-50VDXIyyKvmITBrGEyvzzVa00v6y3KZoihtf5xs" | |||
access_token_secret="53g8fdSl2URB4SY294XncUgAjGcmojWfRjacc3Hjuk2XD" | |||
auth = tweepy.OAuthHandler(consumer_key, consumer_secret) | |||
auth.set_access_token(access_token, access_token_secret) | |||
api = tweepy.API(auth) | |||
graph= GraphAPI("CAATZCEv2dRlABABZBEjBMq3SudDcSoP5MzZBrccwM7eDhnelmgELB3LBHJ17Fez31GC0YZCYFJparvjUXXZCDQb1p\ | |||
LFZCmSWA9yZCDOQY5MfUvvv9Xc2YgQ9OvEGazwutXYZAad1Po4tIdnuAOSR0ZBHVX3VIgOV1WBHAuxZB7Wi87Y11GEcuDKwiXVZCUbuQPOIHMZD") | |||
places = sqlite3.connect("places.sqlite") | |||
database = places.cursor() | |||
database.execute('''SELECT url FROM moz_places''') | |||
january = re.compile(r"http://\S+\w") | |||
lista = [] | |||
n = 0 | |||
#get the urls | |||
for data in database: | |||
urls = database.fetchall() | |||
for url in urls: | |||
february = str(url) | |||
march = january.findall(february) | |||
april = str(march) | |||
may = april.strip("['']") | |||
if may != "": | |||
lista.append(may) | |||
database.execute('''DELETE FROM moz_places WHERE url=?''',(url)) | |||
places.commit() | |||
for item in lista: | |||
try: | |||
june = urllib2.urlopen(item) | |||
july = june.read() | |||
august = BeautifulSoup(july) | |||
images = august.findAll("img") | |||
n = n + 1 | |||
if august.title != None: | |||
september = august.title.string | |||
if september: | |||
october = september.encode("utf-8") | |||
try: | |||
api.update_status(october) | |||
if images: | |||
imagetopost = None | |||
for image in images: | |||
imgsrc = image.get("src") | |||
if imgsrc: | |||
imgsrc = urljoin(item,imgsrc) | |||
tuesday = urllib2.urlopen(imgsrc) | |||
info = tuesday.info() #getting the MIME type of a page | |||
typ = info.gettype() | |||
wednesday = tuesday.read() | |||
if typ == "image/jpeg": | |||
print october | |||
print typ | |||
imagetopost = "file%04d.jpg" % n | |||
f = open(imagetopost,"wb") | |||
f.write(wednesday) | |||
f.close() | |||
break | |||
if imagetopost: | |||
graph.post(path ="me/photos", source= open(imagetopost), message=october) | |||
os.remove("file%04d.jpg" % n) | |||
else: | |||
graph.post(path="me/feed", message = october) | |||
time.sleep(3) | |||
except tweepy.error.TweepError or facepy.exceptions.FacebookError or httplib.BadStatusLine: | |||
print "Caca!" | |||
except urllib2.HTTPError: | |||
print "Bah!" | |||
</syntaxhighlight> | |||
'''Mix Script (posts both on Facebook and Twitter):''' | '''Mix Script (posts both on Facebook and Twitter):''' |
Latest revision as of 21:26, 29 March 2014
Following my essay I was quite interested in the idea of how implicit participation makes use of design choices to benefit from user generated content, thus contributing to information management systems which can be exploited for improving information retrieval/market research. In order for this process to be optimized, production of social relations becomes the new paradigm under post-industrialist market rule, for it is communication which produces economic value. Immaterial labor, then, both satisfies and produces a demand, ultimately forming subjectivity.
Inspired by this idea, I'm experimenting with posting my browsing history on social media, which will ultimately start suggesting me people I might "like" to follow, pages I "might" like, subsequentely leading me to browse new links and so on and so forth. The idea is that, at some point, I'll stop having agency, playing the idea of a hetero-generative subjectivity to the extreme.
The goal with this process (more than a project) is not to prove that my information is being tracked, which is now a more than well-known fact, but rather to explore the informational paths which inform subjectivity everyday, trying to analyze patterns and derive conclusions as to which factors contribute the most for this construction.
Mix Script (posts both on Facebook and Twitter) which also posts images on Facebook:
Seeing that Twitter's interface is not the best for posting pictures, I decided to only post them on Facebook.
import sqlite3, re, time, os
import urllib2
from bs4 import BeautifulSoup
import tweepy, facepy
from facepy import GraphAPI
from urlparse import urljoin
consumer_key="aJiJkOEdd3YKiXVRyfnWDg"
consumer_secret="8TDG4KqBLZPqcvjNFZb7qtJ7brSyqnuTNd1Y9yQrEM"
access_token="2308513993-50VDXIyyKvmITBrGEyvzzVa00v6y3KZoihtf5xs"
access_token_secret="53g8fdSl2URB4SY294XncUgAjGcmojWfRjacc3Hjuk2XD"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
graph= GraphAPI("CAATZCEv2dRlABABZBEjBMq3SudDcSoP5MzZBrccwM7eDhnelmgELB3LBHJ17Fez31GC0YZCYFJparvjUXXZCDQb1p\
LFZCmSWA9yZCDOQY5MfUvvv9Xc2YgQ9OvEGazwutXYZAad1Po4tIdnuAOSR0ZBHVX3VIgOV1WBHAuxZB7Wi87Y11GEcuDKwiXVZCUbuQPOIHMZD")
places = sqlite3.connect("places.sqlite")
database = places.cursor()
database.execute('''SELECT url FROM moz_places''')
january = re.compile(r"http://\S+\w")
lista = []
n = 0
#get the urls
for data in database:
urls = database.fetchall()
for url in urls:
february = str(url)
march = january.findall(february)
april = str(march)
may = april.strip("['']")
if may != "":
lista.append(may)
database.execute('''DELETE FROM moz_places WHERE url=?''',(url))
places.commit()
for item in lista:
try:
june = urllib2.urlopen(item)
july = june.read()
august = BeautifulSoup(july)
images = august.findAll("img")
n = n + 1
if august.title != None:
september = august.title.string
if september:
october = september.encode("utf-8")
try:
api.update_status(october)
if images:
imagetopost = None
for image in images:
imgsrc = image.get("src")
if imgsrc:
imgsrc = urljoin(item,imgsrc)
tuesday = urllib2.urlopen(imgsrc)
info = tuesday.info() #getting the MIME type of a page
typ = info.gettype()
wednesday = tuesday.read()
if typ == "image/jpeg":
print october
print typ
imagetopost = "file%04d.jpg" % n
f = open(imagetopost,"wb")
f.write(wednesday)
f.close()
break
if imagetopost:
graph.post(path ="me/photos", source= open(imagetopost), message=october)
os.remove("file%04d.jpg" % n)
else:
graph.post(path="me/feed", message = october)
time.sleep(3)
except tweepy.error.TweepError or facepy.exceptions.FacebookError or httplib.BadStatusLine:
print "Caca!"
except urllib2.HTTPError:
print "Bah!"
Mix Script (posts both on Facebook and Twitter):
import sqlite3, re, time
import urllib2
from bs4 import BeautifulSoup
import tweepy, facepy
from facepy import GraphAPI
consumer_key="aJiJkOEdd3YKiXVRyfnWDg"
consumer_secret="8TDG4KqBLZPqcvjNFZb7qtJ7brSyqnuTNd1Y9yQrEM"
access_token="2308513993-50VDXIyyKvmITBrGEyvzzVa00v6y3KZoihtf5xs"
access_token_secret="53g8fdSl2URB4SY294XncUgAjGcmojWfRjacc3Hjuk2XD"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
graph=GraphAPI("CAATZCEv2dRlABABZBEjBMq3SudDcSoP5MzZBrccwM7eDhnelmgELB3LBHJ17Fez31GC0YZCYFJparvjUXXZCD\
Qb1pLFZCmSWA9yZCDOQY5MfUvvv9Xc2YgQ9OvEGazwutXYZAad1Po4tIdnuAOSR0ZBHVX3VIgOV1WBHAuxZB7Wi87Y11GEcuDKwiXVZCUbuQPOIHMZD")
places = sqlite3.connect("places.sqlite")
database = places.cursor()
database.execute('''SELECT url FROM moz_places''')
january = re.compile(r"http://\S+\w")
lista = []
#get the urls in the database, erase them once they're extracted
for data in database:
urls = database.fetchall()
for url in urls:
february = str(url)
march = january.findall(february)
april = str(march)
may = april.strip("['']")
if may != "":
lista.append(may)
database.execute('''DELETE FROM moz_places WHERE url=?''',(url))
places.commit()
#open urls, use beautiful soup to extract the titles of the pages, update statuses
for item in lista:
try:
june = urllib2.urlopen(item)
july = june.read()
august = BeautifulSoup(july)
if august.title != None:
september = august.title.string
if september != None:
october = september.encode("utf-8")
try:
api.update_status(october)
graph.post(path="me/feed", message = october)
time.sleep(3)
except tweepy.error.TweepError or facepy.exceptions.FacebookError or httplib.BadStatusLine:
print "Caca!"
First successful attempt with Twitter:
import sqlite3, re, time
import urllib2
import bs4
import tweepy
consumer_key="aJiJkOEdd3YKiXVRyfnWDg"
consumer_secret="8TDG4KqBLZPqcvjNFZb7qtJ7brSyqnuTNd1Y9yQrEM"
access_token="2308513993-50VDXIyyKvmITBrGEyvzzVa00v6y3KZoihtf5xs"
access_token_secret="53g8fdSl2URB4SY294XncUgAjGcmojWfRjacc3Hjuk2XD"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
places = sqlite3.connect("places.sqlite")
database = places.cursor()
database.execute('''SELECT url FROM moz_places''')
january = re.compile(r"http://\S+\w")
lista = []
#get the urls
for data in database:
url = database.fetchone()
february = str(url)
march = january.findall(february)
april = str(march)
may = april.strip("['']")
if may != "":
lista.append(may)
status = open("status.txt").read()
octo = ""
for item in lista:
try:
june = urllib2.urlopen(item)
july = june.read()
august = BeautifulSoup(july)
if august.title != None:
september = august.title.string
if september != None:
october = september.encode("utf-8")
octo = octo + " " + october
if october not in status:
try:
api.update_status(october)
except tweepy.error.TweepError:
print "Repetido!"
time.sleep(2)
except urllib2.HTTPError:
print "Bah!"
if item == lista[len(lista)-1]:
f = open("status.txt","a")
f.write(octo)
First successful attempt with Facebook:
import sqlite3, re, time
import urllib2
from bs4 import BeautifulSoup
import bs4
from facepy import GraphAPI
import facepy
graph=GraphAPI("CAATZCEv2dRlABABZBEjBMq3SudDcSoP5MzZBrccwM7eDhnelmgELB3LBHJ17Fez31GC0YZCYFJparvjUXXZCDQb1\
pLFZCmSWA9yZCDOQY5MfUvvv9Xc2YgQ9OvEGazwutXYZAad1Po4tIdnuAOSR0ZBHVX3VIgOV1WBHAuxZB7Wi87Y11GEcuDKwiXVZCUbuQPOIHMZD")
places = sqlite3.connect("places.sqlite")
database = places.cursor()
database.execute('''SELECT url FROM moz_places''')
january = re.compile(r"http://\S+\w")
lista = []
#get the urls
for data in database:
url = database.fetchone()
february = str(url)
march = january.findall(february)
april = str(march)
may = april.strip("['']")
if may != "":
lista.append(may)
status = open("facestatus.txt").read()
octo = ""
for item in lista:
try:
june = urllib2.urlopen(item)
july = june.read()
august = BeautifulSoup(july)
if august.title != None:
september = august.title.string
if september != None:
october = september.encode("utf-8")
octo = octo + " " + october
if october not in status:
try:
graph.post(path="me/feed", message = october)
except facepy.exceptions.FacebookError:
print "SAY WHA?"
print facepy.exceptions.FacebookError
time.sleep(3)
except urllib2.HTTPError or httplib.BadStatusLine:
print "Bah!"
if item == lista[len(lista)-1]:
f = open("facestatus.txt","a")
f.write(octo)