How to search tweepy for more than one string? - python

In Python 3 and tweepy I have this script to do hashtags searches on Twitter:
import tweepy
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#test = api.get_user('some user')._json
#test
#The test worked
search_result = api.search('#maconhamedicinal' or '#cannabismedicinal')
search_result
[]
The result is an empty list. Please, does anyone know what the problem is?

keywords = ['#maconhamedicinal','#cannabismedicinal']
results = []
for key in keywords:
search_results = api.search(q=key, count=100)
results = results + search_results
for result in results:
# do whatever u wanna do

Related

How do I automate tweets after accessing data through an API?

I am coding a twitter bot but I am having challenges along the way.
The code will get data from an API and tweet out data daily. Unfortunely, I am having trouble getting the return function on the last line to work that will actually send the tweet out.
import urllib.request
from pprint import pprint
import json
import datetime
import tweepy
import time
import os
import logging
def importVax():
link = 'https://data.ontario.ca/api/3/action/datastore_search?resource_id=8a89caa9-511c-4568-af89-7f2174b4378c&limit=100'
query = urllib.request.urlopen(link)
query = json.loads(query.read())
for r in query['result']['records']:
date = datetime.datetime.strptime(r['report_date'][0:10], "%Y-%m-%d").date()
previous_day_admin = r['previous_day_doses_administered']
total_admin = r['total_doses_administered']
total_complete = r['total_vaccinations_completed']
if previous_day_admin == '':
previous_day_admin = 0
print(date, previous_day_admin, total_admin)
if __name__ == '__main__':
importVax()
consumer_key = 'REDACTED'
consumer_secret ='REDACTED'
access_token = 'REDACTED'
access_token_secret = 'REDACTED'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
def daily_update(date, previous_day_admin, total_admin, total_complete):
message = str(
f'''
[{date}]
Doses Administered: {previous_day_admin}
Total Completed Vaccinations: {total_complete}
% Immune: {round(total_complete/14570000)}
'''
)
return api.update_status(message)

how I can schedule tweepy script to run every hour?

This is simple code I wrote to scrape data from twitter using tweepy.
import tweepy
import csv
import pandas as pd
from datetime import date
####input your credentials here
consumer_key = '(Hidden)'
consumer_secret = '(Hidden)'
access_token = '(Hidden)'
access_token_secret = '(Hidden)'
today = date.today()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
csvFile = open('remotejob.csv', 'a')
#Use csv Writer
csvWriter = csv.writer(csvFile)
for tweet in tweepy.Cursor(api.search,q="#jobs #remote",count=5000,
lang="en",
since=today,tweet_mode = 'extended').items():
print (tweet.created_at, tweet.id)
csvWriter.writerow([tweet.created_at, tweet.full_text.encode('utf-8'), tweet.id, tweet.user.name.encode('utf-8'), tweet.user.screen_name.encode('utf-8'), tweet.user.statuses_count, tweet.retweet_count, tweet.favorite_count])
How can I schedule it to run every hour automatically?

tweets are not storing in csv file.i tried so many methods .Actually i getting tweets but they are not storing in my workbook

My code is:
import tweepy
import csv
import pandas as pd
import sys
#input your credentials here
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
# Open/Create a file to append data
csvFile = open('amaravathi.csv', 'a')
#Use csv Writer
csvWriter = csv.writer(csvFile)
for tweet in tweepy.Cursor(api.search,q="#amaravathi",count=10,
lang="en").items():`
csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])
print(tweet.created_at,tweet.text)
csvFile.close()

How to get all tweets from a public account by BeautifulSoup?

I am trying to get all tweets from a given account but I can get only last 20 tweets. How can I get all the tweets that user posted ?
Here is my code:
from bs4 import BeautifulSoup as bs
import urllib
#This function returns tweets from
#given username's account as a list
def get_tweets(username):
tweets = []
URL = "https://twitter.com/"+username
soup = bs(urllib.request.urlopen(URL), 'lxml')
for li in soup.find_all("li", {"data-item-type": "tweet"}):
text_p = li.find("p", class_="tweet-text")
if text_p is not None:
tweets.append(text_p.get_text())
return tweets
In Tweepy, you can get the user's timeline like this:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
for status in tweepy.Cursor(api.user_timeline, username).items():
print('status_id: {}, text: {}'.format(status.id, status.text.encode('utf-8')))
Notice that it's using Tweepy.Cursor, which iterates through the list until there aren't any more items.

python twitter crawling(scraping) JSON error

I did.(Pip install json, Pip install simplejson)
However, errors occur.
simplejson.scanner.JSONDecodeError: Unterminated string starting at:
line 1 column 65922 (char 65921)
tweepy.error.TweepError: Failed to parse JSON payload: Unterminated string starting at: line 1 column 65922 (char 65921)
What should I do?
import tweepy
import time
import os
import json
import simplejson
search_term = 'word1'
search_term2= 'word2'
search_term3='word3'
lat = "xxxx"
lon = "xxxx"
radius = "xxxx"
location = "%s,%s,%s" % (lat, lon, radius)
API_key = "xxxx"
API_secret = "xxxx"
Access_token = "xxxx"
Access_token_secret = "xxxx"
auth = tweepy.OAuthHandler(API_key, API_secret)
auth.set_access_token(Access_token, Access_token_secret)
api = tweepy.API(auth)
c=tweepy.Cursor(api.search,
q="{}+OR+{}".format(search_term, search_term2, search_term3),
rpp=1000,
geocode=location,
include_entities=True)
data = {}
i = 1
for tweet in c.items():
data['text'] = tweet.text
print(i, ":", data)
i += 1
time.sleep(1)
wfile = open(os.getcwd()+"/workk2.txt", mode='w')
data = {}
i = 0
for tweet in c.items():
data['text'] = tweet.text
wfile.write(data['text']+'\n')
i += 1
wfile.close()

Categories

Resources