I am making a Telegram bot that can can access database to reply users' query. The bot need to respond to specific request of certain data in database. I was able to solve for when users request for all data but I am stuck with individual data. I am using telegram.ext from telegram package in python. Here is what I have done so far.
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import MySQLdb
currr = [] # global list var ~don't bash me for using global in python please, I'm a newbie
# request for all data in database
def request2(bot, update):
db = MySQLdb.connect(host = "local", user = "root", passwd = "pwd", db = "mydb")
cur = db.cursor()
cur.execute("select ID from table")
ID = cur.fetchall()
cur.execute("SELECT ID, temp FROM table2 order by indexs desc")
each_rows = cur.fetchall()
for IDs in ID:
for each_row in each_rows:
if str(each_row[0])[0:4]==str(ID)[2:6]:
update.message.reply_text('reply all related data here')
break
# request for single data
def individualreq(bot, update):
db = pymysql.connect(host = "localhost", user = "root", passwd = "pwd", db = "mydb")
update.message.reply_text('reply individual data to users here')
def main():
updater = Updater("TOKEN")
dp = updater.dispatcher
global currr
# get all ID form database
db = MySQLdb.connect(host = "localhost", user = "root", passwd = "pwd", db = "mydb")
cur = db.cursor()
cur.execute("select ID from table")
curr_ID = cur.fetchall()
# example ID = 'F01', 'F02', 'F03'
for curr_IDs in curr_ID:
currr.append(curr_IDs[0])
# request all data
dp.add_handler(CommandHandler("all", request2))
# request individual data
dp.add_handler(CommandHandler(currr, individualreq)) # list command in currr[]
if __name__ == '__main__':
main()
I am looking for a way to pass the current command which is also the ID in database that user request in the currr[] list to the individualreq(bot, update) function so that only data of the called ID is being replied. Users will select from a list of ID in telegram and the command handler can pass the selected ID to the function. I have not found a way to pass the ID to the function. Could someone help me to solve this please. Thanks
I find out a solution for my question from the answer provided by Oluwafemi Sule. CommandHandler can pass the arguments of the command to the function by adding pass_args=True in the CommandHandler.
dp.add_handler(CommandHandler(currr, individualreq, pass_args=True))
To print out the args in the function, the function need to receive the args.
def individualreq(bot, update, args):
# id store the args value
id = update.message.text
print(id[1:]) # [1:] is to get rid of the / in id
You can outright make individualreq a closure.
CommandHandler takes a command or list of command to listen to and a list other options.
There is a pass_user_data option that allows for user data to be passed to the callback.
dp.add_handler(CommandHandler(currr, individualreq, pass_user_data=True))
The signature for individualreq callback will be updated to take the user_data
def individualreq(bot, update, user_data=None):
#user_data is a dict
print(user_data)
Related
Requirement: 1. I want to create python API which will help to insert data in big query table and this API will host in swagger/postman, from there user can provide input data so that it will get reflected in big query table.
Can anyone help me to find out suitable solution with code
import sqlite3 as sql
from google.cloud import bigquery
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file('path/to/file.json')
project_id = 'project_id'
client = bigquery.Client(credentials= credentials,project=project_id)
def add_data(group_name, user_name):
try:
# Connecting to database
con = sql.connect('shot_database.db')
# Getting cursor
c = con.cursor()
# Adding data
job_config.use_legacy_sql = True
query_job = client.query("""
INSERT INTO `table_name` (group, user)
VALUES (%s, %s)""",job_config = job_config)
results = query_job.result() # Wait for the job to complete.
# Applying changes
con.commit()
except:
print("An error has occured")
The code you provided is a mix of SQLite and BigQuery, but it likes that you're trying to use BigQuery to insert data into a table. To insert data into a BigQuery table using Python, you can use the insert_data() method of the Client class. Here's I am adding an example of how you can use this method to insert data into a table called "mytable" in a dataset called "mydataset":
# Define the data you want to insert
data = [
{
"group": group_name,
"user": user_name
}
]
# Insert the data
table_id = "mydataset.mytable"
errors = client.insert_data(table_id, data)
if errors == []:
print("Data inserted successfully")
else:
print("Errors occurred while inserting data:")
print(json.dumps(errors, indent=2))
Then, You can create an API using Flask or Django and call the add_data method which you have defined to insert data into big query table.
Using discord.py, I'm trying to first query a database for some values that are displayed to the user as the default value within a slash command's argument (essentially allowing them to edit values).
Simplifying the problem: the slash command, would be
/update [Age]
But I'd like to default the value for [Age] with a value queried from a database. The problem is how to query the database at the time the command is issued. For reference, this is contained in a cog.
import discord
from discord.ext import commands
from discord import app_commands
class Age(commands.Cog):
def __init__(self, client: commands.Bot):
self.client = client
print("age.py LOADED!")
#app_commands.command(
name="age",
description="Update your Age")
#HERE IS WHERE I'D LIKE TO SHOW A DEFAULT NUMBER BASED ON A DB QUERY
async def age(self, interaction: discord.Interaction,age: int):
data = self.client.db
data.reconnect()
cursor = data.cursor()
qry = (
f"update users set age= {age} where d_uid = %s and d_gid = %s"
)
val = (str(interaction.user.id), str(interaction.guild_id))
try:
cursor.execute(qry,val)
data.commit()
except:
await interaction.response.send_message(
f'ERROR! Something went wrong. Sorry about that. The Botmaster has been notified.'
)
else:
await interaction.response.send_message(
f'Age to {age}!'
)
finally:
cursor.close()
async def setup(client: commands.Bot) -> None:
await client.add_cog(Age(client))
Is this even possible?
Not sure how I can query the data before the command renders the arguments to the user, along with a default.
I have a Pulumi (python) script that needs to query a database to get a list of customers. The rest of the setup that it does is based on that list.
I've tried to store the username/password for that list in a pulumi secret with pulumi config set --secret db_user $USER and pulumi config set --secret db_password $PASSWORD so that they are encrypted in the pulumi stack file. The problem is that when I try to retrieve them, they are Output objects. I think that pulumi does this so that it can track the value and the resource that created it together, but I just need the string values so I can connect to a database and run a query, as in this simplified example:
db_host = pulumi_config.require("db_host")
db_name = pulumi_config.require("db_name")
db_user = pulumi_config.require_secret("db_user")
db_password = pulumi_config.require_secret("db_password")
# psycopg2.connect fails with an error:
# TypeError: <pulumi.output.Output object at 0x10feb3df0> has type Output, but expected one of: bytes, unicode
connection = psycopg2.connect(
host = db_host,
database = db_name,
user = db_user,
password = db_password)
cursor = connection.cursor()
query = """
SELECT id
FROM customers
WHERE ready = true
ORDER BY id DESC
"""
cursor.execute(query)
customer_ids = []
for record in cursor:
customer_ids.append(record[0])
The code above fails when I try to connect with psycopg2 because it requires a string.
I know that when I use Pulumi libraries that take Pulumi Inputs/Outputs as parameters, the secrets are decrypted just fine. So how can I decrypt these secrets for use with non-Pulumi code?
I think that pulumi does this so that it can track the value and the resource that created it together
The actual reason is because Pulumi needs to resolve the value it retrieves from config, and its an eventual operation. Pulumi decrypts the value using the key first, and once that's done it can resolve it.
You're dealing with an Output and like any other Output, you need to resolve the value using an apply if you want to interpolate it into a string.
connection = Output.all(db_user, db_password) \
.apply(lambda args: psycopg2.connect(
host = db_host,
database = db_name,
user = args[0],
password = args[1]))
# perform your SQL query here
Note, all of the logic you're talking about needs to happen inside the apply
As a reference for anyone else who tries to do something like this, the complete solution looked like this:
# Takes a connection object, uses it to perform a query, and then returns a list of customer IDs
def do_query(connection):
query = """
SELECT id
FROM customers
WHERE ready = true
ORDER BY id DESC
"""
cursor = connection.cursor()
cursor.execute(query)
customer_ids = []
for record in cursor:
customer_ids.append(record[0])
return customer_ids
# gets a list of customer IDs, wrapped in an Output object.
def get_customer_ids():
customer_ids = Output.all(db_user, db_password) \
.apply(lambda args: do_query(
psycopg2.connect(
host = db_host,
database = db_name,
user = args[0],
password = args[1])))
return customer_ids
NOTE: The list of customer IDs will still be wrapped in an Output object, so when you want to use it, you will need to do something like this:
def create_connector_for_customers(customer_ids):
for customer in customer_ids:
connector_config = ConnectorConfigArgs(
# Use customer_id to set up connector
)
destination_schema = ConnectorDestinationSchemaArgs(
# Use customer_id to set up connector
)
# The customer ID list is wrapped in an Output, it can only be accessed within an `apply`
customer_list = get_customer_ids()
customer_list.apply(lambda customer_ids: create_connector_for_customers(customer_ids))
I'm streaming Twitter data from the API into a Postgres database by modeling this script. Using those exact methods, I'm able to stream the data successfully into the two tables (one containing user_id/user_name, and the other containing data). I've been able to make minor changes to extract a few other bits of information, but using these methods I'm only collecting retweets given a keyword list and I would like to collect all tweets given the list. Based on the way the original script is collecting/storing retweet user_ids and user_names, I changed the code tried to stream into a new table without making any references to retweets. Unfortunately, the result of this were two empty tables. The code ran fine otherwise, and was printing statements to the terminal, there was just no data. Why would this be? Below is my code:
import psycopg2
import tweepy
import json
import numpy as np
# Importing postgres credentials
import postgres_credentials
# Importing twitter credentials
import twitter_credentials
# Accesing twitter from the App created in my account
def autorize_twitter_api():
"""
This function gets the consumer key, consumer secret key, access token
and access token secret given by the app created in your Twitter account
and authenticate them with Tweepy.
"""
# Get access and costumer key and tokens
auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
def create_tweets_table(term_to_search):
"""
This function open a connection with an already created database and creates a new table to
store tweets related to a subject specified by the user
"""
# Connect to Twitter Database created in Postgres
conn_twitter = psycopg2.connect(dbname=postgres_credentials.dbname, user=postgres_credentials.user, password=postgres_credentials.password, host=postgres_credentials.host,
port=postgres_credentials.port)
# Create a cursor to perform database operations
cursor_twitter = conn_twitter.cursor()
# with the cursor now, create two tables, users twitter and the corresponding table according to the selected topic
cursor_twitter.execute("CREATE TABLE IF NOT EXISTS test_twitter_users (user_id VARCHAR PRIMARY KEY, user_name VARCHAR);")
query_create = "CREATE TABLE IF NOT EXISTS %s (id SERIAL, created_at_utc timestamp, tweet text NOT NULL, user_id VARCHAR, user_name VARCHAR, PRIMARY KEY(id), FOREIGN KEY(user_id) REFERENCES twitter_users(user_id));" % (
"test_tweet_text")
cursor_twitter.execute(query_create)
# Commit changes
conn_twitter.commit()
# Close cursor and the connection
cursor_twitter.close()
conn_twitter.close()
return
def store_tweets_in_table(term_to_search, created_at_utc, tweet, user_id, user_name):
"""
This function open a connection with an already created database and inserts into corresponding table
tweets related to the selected topic
"""
# Connect to Twitter Database created in Postgres
conn_twitter = psycopg2.connect(dbname=postgres_credentials.dbname, user=postgres_credentials.user, password=postgres_credentials.password, host=postgres_credentials.host,
port=postgres_credentials.port)
# Create a cursor to perform database operations
cursor_twitter = conn_twitter.cursor()
# with the cursor now, insert tweet into table
cursor_twitter.execute(
"INSERT INTO test_twitter_users (user_id, user_name) VALUES (%s, %s) ON CONFLICT(user_id) DO NOTHING;",
(user_id, user_name))
cursor_twitter.execute(
"INSERT INTO %s (created_at_utc, tweet, user_id, user_name) VALUES (%%s, %%s, %%s, %%s);" % (
'test_tweet_text'),
(created_at_utc, tweet, user_id, user_name))
# Commit changes
conn_twitter.commit()
# Close cursor and the connection
cursor_twitter.close()
conn_twitter.close()
return
class MyStreamListener(tweepy.StreamListener):
'''
def on_status(self, status):
print(status.text)
'''
def on_data(self, raw_data):
try:
global term_to_search
data = json.loads(raw_data)
# Obtain all the variables to store in each column
user_id = data['user']['id']
user_name = data['user']['name']
created_at_utc = data['created_at']
tweet = data['text']
# Store them in the corresponding table in the database
store_tweets_in_table(term_to_search, created_at_utc, tweet, user_id, user_name)
except Exception as e:
print(e)
def on_error(self, status_code):
if status_code == 420:
# returning False in on_error disconnects the stream
return False
########################################################################
while True:
if __name__ == "__main__":
# Creates the table for storing the tweets
term_to_search = ["donald trump","trump"]
create_tweets_table(term_to_search)
# Connect to the streaming twitter API
api = tweepy.API(wait_on_rate_limit_notify=True)
# Stream the tweets
try:
streamer = tweepy.Stream(auth=autorize_twitter_api(), listener=MyStreamListener(api=api),tweet_mode='extended')
streamer.filter(track=term_to_search)
except:
continue
What happen if you print the values in this function? do you have values there?
def on_data(self, raw_data):
try:
global term_to_search
data = json.loads(raw_data)
# Obtain all the variables to store in each column
user_id = data['user']['id']
user_name = data['user']['name']
created_at_utc = data['created_at']
tweet = data['text']
# Store them in the corresponding table in the database
store_tweets_in_table(term_to_search, created_at_utc, tweet, user_id, user_name)
except Exception as e:
print(e)
When you print the sql statements, can you see the inserts without data?
I discovered the issue - I was creating two new tables, but inserting data into two different tables.
I'm trying to use Bottle framework in python with sqlite3. Then I made a Todo List application but when I tried to post a data at the first time the error happened differently from above. The second time 'database is locked' happened.
Can anyone help?
#_*_ coding:utf-8- _*_
import os, sqlite3
from bottle import route, run, get, post, request, template
#sqlite from here----------------
dbname = "todo.db"
connection = sqlite3.connect(dbname)
dbcontrol = connection.cursor()
#Making table from here--------------------
create_table = '''create table todo_list (todo text)'''
#route("/")
def index():
todo_list = get_todo()
return template("index", todo_list=todo_list)
I think I need more specific code here.
#route("/enter", method=["POST"])
def enter():
conn = sqlite3.connect("todo.db")
todo=request.POST.getunicode("todo_list")
save_todo(todo)
return redirect("/")
def save_todo(todo):
connection = sqlite3.connect('todo.db')
dbcontrol = connection.cursor()
insert="insert into todo_list(todo) values('{0}')".format(todo)
dbcontrol.execute(insert)
connection.commit()
def get_todo():
connection=sqlite3.connect('todo.db')
dbcontrol = connection.cursor()
select = "select * from todo_list"
dbcontrol.execute(select)
row = dbcontrol.fetchall()
return row
run(host="localhost", port=8080, debug=True)
Install the bottle-sqlite with:
$ pip install bottle-sqlite
An example from the plugin
import bottle
app = bottle.Bottle()
plugin = bottle.ext.sqlite.Plugin(dbfile='/tmp/test.db')
app.install(plugin)
#app.route('/show/:item')
def show(item, db):
row = db.execute('SELECT * from items where name=?', item).fetchone()
if row:
return template('showitem', page=row)
return HTTPError(404, "Page not found")
Important notes from the plugin
Routes that do not expect a db keyword argument are not affected.
The connection handle is configured so that sqlite3.Row objects can be
accessed both by index (like tuples) and case-insensitively by name.
At the end of the request cycle, outstanding transactions are
committed and the connection is closed automatically. If an error
occurs, any changes to the database since the last commit are rolled
back to keep the database in a consistent state.
Also take a look at Configuration section.