Bad credentials from Astra DB connect - python

Currently I am learning about Astra DB from this youtube link
https://www.youtube.com/watch?v=NyDT3KkscSk&t=2439s
I manage to download the connection.zip file from Astra DB and generated admin token keys. But when I try to do connection such as:
from app.crud import create_entry
I will get this error:
raise NoHostAvailable("Unable to connect to any servers", errors)
cassandra.cluster.NoHostAvailable: ('Unable to connect to any servers', {'98db9cb2-907a-4f9d-a935-69b69fb2157f-asia-south1.db.astra.datastax.com:29042:611a8370-f129-4099-84e2-c3b2f426ebdc': AuthenticationFailed('Failed to authenticate to 98db9cb2-907a-4f9d-a935-69b69fb2157f-asia-south1.db.astra.datastax.com:29042:611a8370-f129-4099-84e2-c3b2f426ebdc: Error from server: code=0100 [Bad credentials] message="We recently improved your database security. To find out more and reconnect, see https://docs.datastax.com/en/astra/docs/manage-application-tokens.html"'), '98db9cb2-907a-4f9d-a935-69b69fb2157f-asia-south1.db.astra.datastax.com:29042:040ab116-8c77-4eb4-a357-c9bdcbb637d4': AuthenticationFailed('Failed to authenticate to 98db9cb2-907a-4f9d-a935-69b69fb2157f-asia-south1.db.astra.datastax.com:29042:040ab116-8c77-4eb4-a357-c9bdcbb637d4: Error from server: code=0100 [Bad credentials] message="We recently improved your database security. To find out more and reconnect, see https://docs.datastax.com/en/astra/docs/manage-application-tokens.html"'), '98db9cb2-907a-4f9d-a935-69b69fb2157f-asia-south1.db.astra.datastax.com:29042:536e6e99-ef4e-47d0-9308-b0c6cdf4aa37': AuthenticationFailed('Failed to authenticate to 98db9cb2-907a-4f9d-a935-69b69fb2157f-asia-south1.db.astra.datastax.com:29042:536e6e99-ef4e-47d0-9308-b0c6cdf4aa37: Error from server: code=0100 [Bad credentials] message="We recently improved your database security. To find out more and reconnect, see https://docs.datastax.com/en/astra/docs/manage-application-tokens.html"')})
Here is my db.py:
import os
import pathlib
from dotenv import load_dotenv
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine.connection import register_connection, set_default_connection
BASE_DIR = pathlib.Path(__file__).parent
CLUSTER_BUNDLE = BASE_DIR / 'ignored'/ 'connect.zip'
load_dotenv()
astra_db_client_id = os.environ.get('ASTRA_DB_CLIENT_ID')
astra_db_client_secret = os.environ.get('ASTRA_DB_CLIENT_SECRET')
def get_cluster():
cloud_config= {
'secure_connect_bundle': CLUSTER_BUNDLE
}
auth_provider = PlainTextAuthProvider(astra_db_client_id, astra_db_client_secret)
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider, control_connection_timeout=30,
connect_timeout=30)
return cluster
def get_session():
cluster = get_cluster()
session = cluster.connect()
register_connection(str(session), session=session)
set_default_connection(str(session))
return session
# session = get_session()
# row = session.execute("select release_version from system.local").one()
# if row:
# print(row[0])
# else:
# print("An error occurred.")
I tried to recreate the token key many times and re download the drivers as well but I still have no luck in passing the bad credential errors here:
my crud.py
from .db import get_session
from .models import Product
from cassandra.cqlengine.management import sync_table
session = get_session()
sync_table(Product)
def create_entry(data:dict):
return Product.create(**data)
models.py
from cassandra.cqlengine import columns
from cassandra.cqlengine.models import Model
class Product(Model): # -> table
__keyspace__ = "testing" #
asin = columns.Text(primary_key=True, required=True)
title = columns.Text()

You might want to take a look at this
https://docs.datastax.com/en/astra/docs/docs/using-the-datastax-python-driver-to-connect-to-your-database.html
Specifically the section here:
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
cloud_config= {
'secure_connect_bundle': '/path/to/secure-connect-database_name.zip'
}
auth_provider = PlainTextAuthProvider('username', 'password')
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
session = cluster.connect()
When creating the connection you’ll want to pass the secure connect bundle zip. You’ll then provide the clientId and clientSecret as the username and password from the connections file you downloaded.

Related

TWILIO API ERROR Credentials are required to create a TwilioClient django

I am trying to include TWILIO API to my project. It should send sms. I have finished tutorial, but then i get error Credentials are required to create a TwilioClient. I have credentials in .env file and then i try to import them to settings and then get this credentials from settings to views.
This is when i get error.
.env
TWILIO_ACCOUNT_SID= 'xxxxxxxxxxxxxxxxxxxxxx'
TWILIO_AUTH_TOKEN= 'xxxxxxxxxxxxxxxxxxxxxxx'
TWILIO_NUMBER= 'xxxxxxxxxxxxxxxxxx'
settings.py
import os
TWILIO_ACCOUNT_SID = os.getenv('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.getenv('TWILIO_AUTH_TOKEN')
TWILIO_NUMBER = os.getenv('TWILIO_NUMBER')
SMS_BROADCAST_TO_NUMBERS = [
'+111111111',
]
views
from django.conf import settings
from django.http import HttpResponse
from twilio.rest import Client
def broadcast_sms(request):
message_to_broadcast = ("Have you played the incredible TwilioQuest "
"yet? Grab it here: https://www.twilio.com/quest")
client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)
for recipient in settings.SMS_BROADCAST_TO_NUMBERS:
if recipient:
client.messages.create(to=recipient,
from_=settings.TWILIO_NUMBER,
body=message_to_broadcast)
return HttpResponse("messages sent!", 200)
and here is when code work, but i want to import this from settings..
# def sms(request):
# TWILIO_ACCOUNT_SID = "xxxxxxxxxxxxxxxxxxxxxxx"
# TWILIO_AUTH_TOKEN = "xxxxxxxxxxxxxxxxx"
# TWILIO_NUMBER = "xxxxxxxxxxxxx"
# message_to_broadcast = ("Have you played the incredible TwilioQuest "
# "yet? Grab it here: https://www.twilio.com/quest")
#
# client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
# for recipient in settings.SMS_BROADCAST_TO_NUMBERS:
# if recipient:
# client.messages.create(to=+xxxxxxxxx,
# from_=+xxxxxxxxxx,
# body=message_to_broadcast)
# return HttpResponse("messages sent!", 200)
Any idea how to solve this?
So you are using a .env file rather than setting your OS's environmental variables? If so, there is and article below, pointing to https://github.com/theskumar/python-dotenv.
How To Set Environment Variables

"Oracle "alter session set" via flask-sqlalchemy and cx-oracle"

how can I start a connection to an oracle database using flask-sqlalchemy and cx-oracle by sending these parameters:
alter session set nls_comp=linguistic;
alter session set nls_sort=Latin_AI;
In order to be able to make queries and sorts that do not distinguish, for example, between "Jose" and "José"
That's how I usually make the connection:
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
def create_app(config_name=(os.getenv('FLASK_CONFIG') or 'default')):
app = Flask(__name__)
app.config.from_object(config[config_name])
db.init_app(app)
from commons import commons_bp
from admin import admin_bp
from inventario import inventario_bp
from incidencias import incidencias_bp
from inconformidades import inconformidades_bp
with app.app_context():
app.register_blueprint(commons_bp)
app.register_blueprint(admin_bp)
app.register_blueprint(inventario_bp)
app.register_blueprint(incidencias_bp)
app.register_blueprint(inconformidades_bp)
return app
and the configuration:
class DevelopmentConfig(Config):
TESTING = True
DEBUG = True
SQLALCHEMY_ECHO = os.environ.get('SQLALCHEMY_ECHO') or False
SQLALCHEMY_POOL_SIZE = None
SQLALCHEMY_POOL_TIMEOUT = None
SQLALCHEMY_BINDS = {
'adminapps': 'oracle://ADMINAPPS:ADMINAPPS#SERVIDOR53/ENTECO',
'incidencias': 'oracle://INCIDENCIAS:INCIDENCIAS#SERVIDOR53/ENTECO',
'incidencias': 'oracle://INCIDENCIAS:INCIDENCIAS#SERVIDOR53/ENTECO',
'inventario': 'oracle://INVENTARIO:INVENTARIO#SERVIDOR53/ENTECO',
'inconformidades': 'oracle://INCONFORMIDADES:INCONFORMIDADES#SERVIDOR53/ENTECO',
'documentacion': 'oracle://DOCUMENTACION:DOCUMENTACION#SERVIDOR53/ENTECO',
'auditoria': 'oracle://AUDITORIA:AUDITORIA#SERVIDOR53/ENTECO',
'navision': 'mssql+pyodbc://DB:DB.2020#SERVIDOR52',
}
I've tried among other things:
'adminapps': ['oracle://ADMINAPPS:ADMINAPPS#SERVIDOR53/ENTECO', 'alter session set "nls_comp"="linguistic"', 'alter session set "nls_sort"="Latin_AI"'],
But I haven't been successful, I can't figure out how to do it, thank you for your answers!

How to list/create a domain using designateclient?

I've been trying to use the designate client to create DNS entries, but even before creating them, I tried listing all the entries.
Started with listing zones first and it doesn't seem to work.
#!/usr/bin/env python3
import os
import json
from designateclient.v2 import client
from keystoneauth1.identity import generic
from keystoneauth1 import session as keystone_session
auth = generic.Password(
auth_url='url',
username='username',
password=os.environ['password'],
project_name='domain name',
project_domain_id='default',
user_domain_id='default')
session = keystone_session.Session(auth=auth)
client = client.Client(session=session)
print(client.zones.list())
Any help how I could get this data? Thank you in advance :)
So I got over the issue of authentication, by using the following code:
import os
from keystoneclient.auth.identity import v3
from keystoneclient import session
from keystoneclient.v3 import client
from designateclient.v2 import client as d_client
v3_auth = v3.Password(auth_url='auth_url',
username='username',
password=os.environ['pass'],
project_name='project_name',
project_domain_name="project_domain_name",
user_domain_name="user_domain_name")
v3_ses = session.Session(auth=v3_auth)
auth_token = v3_ses.get_token()
session = session.Session(auth=v3_auth, timeout=10)
desig_client = d_client.Client(session=session)
print(desig_client)
list_of_zones = desig_client.zones.list()
The next question is how do I create a new domain?

Flask API facing InterfaceError with PostgreSQL

I have a Flask API based on Flask RestPlus extension and is hosted on Google App Engine. The API does a basic job of fetching data from a Google Cloud SQL PostgreSQL. The API is working fine otherwise but sometimes it starts returning InterfaceError: cursor already closed.
Strangely, when I do a gcloud app deploy, the API starts working fine again.
Here's a basic format of the API:
import simplejson as json
import psycopg2
from flask import Flask, jsonify
from flask_restplus import Api, Resource, fields
from psycopg2.extras import RealDictCursor
app = Flask(__name__)
app.config['SWAGGER_UI_JSONEDITOR'] = True
api = Api(app=app,
doc='/docs',
version="1.0",
title="Title",
description="description")
app.config['SWAGGER_UI_JSONEDITOR'] = True
ns_pricing = api.namespace('cropPricing')
db_user = "xxxx"
db_pass = "xxxx"
db_name = "xxxxx"
cloud_sql_connection_name = "xxxxxx"
conn = psycopg2.connect(user=db_user,
password=db_pass,
host='xxxxx',
dbname=db_name)
#ns_pricing.route('/list')
class States(Resource):
def get(self):
"""
list all the states for which data is available
"""
cur = conn.cursor(cursor_factory=RealDictCursor)
query = """
SELECT
DISTINCT state
FROM
db.table
"""
conn.commit()
cur.execute(query)
states = json.loads(json.dumps(cur.fetchall()))
if len(states) == 0:
return jsonify(data=[],
status="Error",
message="Requested data not found")
else:
return jsonify(status="Success",
message="Successfully retreived states",
data=states)
What should I fix to not see the error anymore?
It would be good to use the ORMs such as SQLAlchemy / Flask-SQLAlchemy which would handle the establishing / re-establishing the connection part.
Though, if using psycopg2. you can use try except to catch the exception and re-establish the connection again.
try:
cur.execute(query)
except psycopg2.InterfaceError as err:
print err.message
conn = psycopg2.connect(....)
cur = conn.cursor()
cur.execute(query)

Creating a reusable datastore client for large flask application

I want to create a database connection from my python flask application to my datastore instance on GCP. I have a file services/db.py:
from google.cloud import datastore
from google.auth import compute_engine
from config import env
import os
import logging
import warnings
namespace = env.str('ENV')
class Datastore_Client():
def context_local(self):
datastore_client = datastore.Client(namespace=namespace)
return datastore_client
def context_test(self):
project = env.str("GOOGLE_CLOUD_PROJECT")
credentials = compute_engine.Credentials()
datastore_client = datastore.Client(credentials=credentials, project=project, namespace='test')
return datastore_client
def context_live(self):
datastore_client = datastore.Client(namespace=namespace)
return datastore_client
def get_datastore_client(self):
contexts = {
'local': self.context_local,
'test': self.context_test,
'appengine': self.context_live
}
context_func = contexts.get(env.str("CONTEXT"), self.context_local)
return context_func()
builtin_list = list
def from_datastore(self, entity):
if not entity:
return None
if isinstance(entity, list):
entity = entity.pop()
return entity
In my model files I would reference the datastore client like so:
from ..services.db import Datastore_Client
client = Datastore_Client().get_datastore_client()
but in providing this reference in all files which need it when running my application it seems to spin up a database connection for each instance whereas I would want an application wide connection.
I have looked at application context but with example using sqlite and talking about tearing down the connection after I am not sure if the same approach can be used for datastore.
You are creating an object everytime you call Datastore_Client(). It sounds like you want to create a Singleton Datastore_Client and use that in your model files.

Categories

Resources