Skip to content

Instantly share code, notes, and snippets.

@carlosf
Forked from Kaorw/History\-11bc708b\entries.json
Created August 20, 2017 23:28
Show Gist options
  • Save carlosf/274808ceff6cdafaefcd4c43b75655b1 to your computer and use it in GitHub Desktop.
Save carlosf/274808ceff6cdafaefcd4c43b75655b1 to your computer and use it in GitHub Desktop.

Revisions

  1. @Kaorw Kaorw revised this gist Dec 28, 2013. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion tweet_dumper.py
    Original file line number Diff line number Diff line change
    @@ -1,5 +1,5 @@
    # Grap multiple user's user_timeline from twitter API and save to Excel

    # Code will be save user's tweet ID, created Time, Coordinates-x, Coordinates-y, source, text. Can be modified at line 48 and so on
    # Original code from https://gist.github.com/yanofsky/5436496 "A script to download all of a user's tweets into a csv"

    import xlsxwriter
  2. @Kaorw Kaorw revised this gist Dec 28, 2013. 1 changed file with 16 additions and 11 deletions.
    27 changes: 16 additions & 11 deletions tweet_dumper.py
    Original file line number Diff line number Diff line change
    @@ -1,12 +1,16 @@
    #Grap multiple user's user_timeline from twitter API and save to Excel
    # Grap multiple user's user_timeline from twitter API and save to Excel

    # Original code from https://gist.github.com/yanofsky/5436496 "A script to download all of a user's tweets into a csv"

    import xlsxwriter
    import tweepy
    import tweepy

    #https://github.com/tweepy/tweepy

    consumer_key = " "
    consumer_secret = " "
    access_key = " "
    access_secret = " "
    consumer_key = "Your_consumer_key"
    consumer_secret = "Your_consumer_secret"
    access_key = "Your_access_key"
    access_secret = "Your_access_secret"

    def get_all_tweets(screen_name):

    @@ -108,9 +112,10 @@ def write_worksheet(twitter_name):
    row += 1
    col = 0

    workbook = xlsxwriter.Workbook('test2.xlsx')
    write_worksheet(' ')
    write_worksheet(' ')
    write_worksheet(' ')
    write_worksheet(' ')
    workbook = xlsxwriter.Workbook('Twitter_timeline.xlsx')


    write_worksheet('twitterID1')
    write_worksheet('twitterID2')

    workbook.close()
  3. @Kaorw Kaorw revised this gist Nov 22, 2013. 1 changed file with 108 additions and 55 deletions.
    163 changes: 108 additions & 55 deletions tweet_dumper.py
    Original file line number Diff line number Diff line change
    @@ -1,63 +1,116 @@
    #!/usr/bin/env python
    # encoding: utf-8
    #Grap multiple user's user_timeline from twitter API and save to Excel

    import tweepy #https://github.com/tweepy/tweepy
    import csv

    #Twitter API credentials
    consumer_key = ""
    consumer_secret = ""
    access_key = ""
    access_secret = ""
    import xlsxwriter
    import tweepy

    consumer_key = " "
    consumer_secret = " "
    access_key = " "
    access_secret = " "

    def get_all_tweets(screen_name):
    #Twitter only allows access to a users most recent 3240 tweets with this method

    #authorize twitter, initialize tweepy
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)

    #initialize a list to hold all the tweepy Tweets
    alltweets = []

    #make initial request for most recent tweets (200 is the maximum allowed count)
    new_tweets = api.user_timeline(screen_name = screen_name,count=200)

    #save most recent tweets
    alltweets.extend(new_tweets)


    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)

    alltweets = []
    new_tweets = []
    outtweets = []

    new_tweets = api.user_timeline(screen_name = screen_name,count=200)

    alltweets.extend(new_tweets)

    #save the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1

    #keep grabbing tweets until there are no tweets left to grab
    while len(new_tweets) > 0:
    print "getting tweets before %s" % (oldest)

    #all subsiquent requests use the max_id param to prevent duplicates
    new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)

    #save most recent tweets
    alltweets.extend(new_tweets)

    #update the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1

    print "...%s tweets downloaded so far" % (len(alltweets))

    #transform the tweepy tweets into a 2D array that will populate the csv
    outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]

    #write the csv
    with open('%s_tweets.csv' % screen_name, 'wb') as f:
    writer = csv.writer(f)
    writer.writerow(["id","created_at","text"])
    writer.writerows(outtweets)
    oldest = alltweets[-1].id - 1

    #keep grabbing tweets until there are no tweets left to grab
    while len(new_tweets) > 0:
    print "getting tweets before %s" % (oldest)

    #all subsiquent requests use the max_id param to prevent duplicates
    new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)

    #save most recent tweets
    alltweets.extend(new_tweets)

    #update the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1

    print "...%s tweets downloaded so far" % (len(alltweets))

    #transform the tweepy tweets into a 2D array
    outtweets = [[tweet.id_str, tweet.created_at, tweet.coordinates,tweet.geo,tweet.source,tweet.text] for tweet in alltweets]

    return outtweets

    def write_worksheet(twitter_name):

    #formating for excel
    format01 = workbook.add_format()
    format02 = workbook.add_format()
    format03 = workbook.add_format()
    format04 = workbook.add_format()
    format01.set_align('center')
    format01.set_align('vcenter')
    format02.set_align('center')
    format02.set_align('vcenter')
    format03.set_align('center')
    format03.set_align('vcenter')
    format03.set_bold()
    format04.set_align('vcenter')
    format04.set_text_wrap()

    out1 = []
    header = ["id","created_at","coordinates-x","coordinates-y","source","text"]

    worksheet = workbook.add_worksheet(twitter_name)

    out1 = get_all_tweets(twitter_name)
    row = 0
    col = 0

    worksheet.set_column('A:A', 20)
    worksheet.set_column('B:B', 18)
    worksheet.set_column('C:C', 13)
    worksheet.set_column('D:D', 13)
    worksheet.set_column('E:E', 20)
    worksheet.set_column('F:F', 120)

    for h_item in header:
    worksheet.write(row, col, h_item, format03)
    col = col + 1

    row += 1
    col = 0

    pass
    for o_item in out1:
    write = []
    cord1 = 0
    cord2 = 0
    write = [o_item[0], o_item[1], o_item[4], o_item[5]]

    if o_item[2]:
    cord1 = o_item[2]['coordinates'][0]
    cord2 = o_item[2]['coordinates'][1]
    else:
    cord1 = ""
    cord2 = ""

    format01.set_num_format('yyyy/mm/dd hh:mm:ss')
    worksheet.write(row, 0, write[0], format02)
    worksheet.write(row, 1, write[1], format01)
    worksheet.write(row, 2, cord1, format02)
    worksheet.write(row, 3, cord2, format02)
    worksheet.write(row, 4, write[2], format02)
    worksheet.write(row, 5, write[3], format04)
    row += 1
    col = 0

    if __name__ == '__main__':
    #pass in the username of the account you want to download
    get_all_tweets("J_tsar")
    workbook = xlsxwriter.Workbook('test2.xlsx')
    write_worksheet(' ')
    write_worksheet(' ')
    write_worksheet(' ')
    write_worksheet(' ')
    workbook.close()
  4. @yanofsky yanofsky revised this gist Nov 1, 2013. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion tweet_dumper.py
    Original file line number Diff line number Diff line change
    @@ -33,7 +33,7 @@ def get_all_tweets(screen_name):

    #keep grabbing tweets until there are no tweets left to grab
    while len(new_tweets) > 0:
    print "getting tweets gefore %s" % (oldest)
    print "getting tweets before %s" % (oldest)

    #all subsiquent requests use the max_id param to prevent duplicates
    new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
  5. @yanofsky yanofsky revised this gist Jun 7, 2013. 1 changed file with 1 addition and 0 deletions.
    1 change: 1 addition & 0 deletions tweet_dumper.py
    Original file line number Diff line number Diff line change
    @@ -12,6 +12,7 @@


    def get_all_tweets(screen_name):
    #Twitter only allows access to a users most recent 3240 tweets with this method

    #authorize twitter, initialize tweepy
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
  6. @yanofsky yanofsky revised this gist Apr 22, 2013. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion tweet_dumper.py
    Original file line number Diff line number Diff line change
    @@ -1,7 +1,7 @@
    #!/usr/bin/env python
    # encoding: utf-8

    import tweepy
    import tweepy #https://github.com/tweepy/tweepy
    import csv

    #Twitter API credentials
  7. @yanofsky yanofsky created this gist Apr 22, 2013.
    62 changes: 62 additions & 0 deletions tweet_dumper.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,62 @@
    #!/usr/bin/env python
    # encoding: utf-8

    import tweepy
    import csv

    #Twitter API credentials
    consumer_key = ""
    consumer_secret = ""
    access_key = ""
    access_secret = ""


    def get_all_tweets(screen_name):

    #authorize twitter, initialize tweepy
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)

    #initialize a list to hold all the tweepy Tweets
    alltweets = []

    #make initial request for most recent tweets (200 is the maximum allowed count)
    new_tweets = api.user_timeline(screen_name = screen_name,count=200)

    #save most recent tweets
    alltweets.extend(new_tweets)

    #save the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1

    #keep grabbing tweets until there are no tweets left to grab
    while len(new_tweets) > 0:
    print "getting tweets gefore %s" % (oldest)

    #all subsiquent requests use the max_id param to prevent duplicates
    new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)

    #save most recent tweets
    alltweets.extend(new_tweets)

    #update the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1

    print "...%s tweets downloaded so far" % (len(alltweets))

    #transform the tweepy tweets into a 2D array that will populate the csv
    outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]

    #write the csv
    with open('%s_tweets.csv' % screen_name, 'wb') as f:
    writer = csv.writer(f)
    writer.writerow(["id","created_at","text"])
    writer.writerows(outtweets)

    pass


    if __name__ == '__main__':
    #pass in the username of the account you want to download
    get_all_tweets("J_tsar")