I have written a script that should purchase an asset from catalog.
import re
from requests import post, get
cookie = "blablabla"
ID = 1562150
# getting x-csrf-token
token = post("https://auth.roblox.com/v2/logout", cookies={".ROBLOSECURITY": cookie}).headers['X-CSRF-TOKEN']
print(token)
# getting item details
detail_res = get(f"https://www.roblox.com/library/{ID}")
text = detail_res.text
productId = int(get(f"https://api.roblox.com/marketplace/productinfo?assetId={ID}").json()["ProductId"])
expectedPrice = int(re.search("data-expected-price=\"(\d+)\"", text).group(1))
expectedSellerId = int(re.search("data-expected-seller-id=\"(\d+)\"", text).group(1))
headers = {
"x-csrf-token": token,
"content-type": "application/json; charset=UTF-8"
}
data = {
"expectedCurrency": 1,
"expectedPrice": expectedPrice,
"expectedSellerId": expectedSellerId
}
buyres = post(f"https://economy.roblox.com/v1/purchases/products/{productId}", headers=headers,
data=data,
cookies={".ROBLOSECURITY": cookie})
if buyres.status_code == 200:
print("Successfully bought item")
The problem is that it somehow doesn't purchase any item with error 500 (InternalServerError).
Someone told me that if I add json.dumps() to the script it might work.
How to add json.dumps() here (I don't understand it though I read docs) and how to fix this so the script purchases item?
Big thanks to anyone who can help me.
Import the json package.
json.dumps() converts a python dictionary to a json string.
I'm guessing this is what you want.
buyres =
post(f"https://economy.roblox.com/v1/purchases/products/{productId}",
headers=json.dumps(headers),
data=json.dumps(data),
cookies={".ROBLOSECURITY": cookie})
I found the answer finally, I had to do it like this:
dataLoad = json.dumps(data)
buyres = post(f"https://economy.roblox.com/v1/purchases/products/{productId}", headers=headers,
data=dataLoad,
cookies={".ROBLOSECURITY": cookie})
Related
When i search for books with a single name(e.g bluets) my code works fine, but when I search for books that have two names or spaces (e.g white whale) I got an error(jinja2 synatx) how do I solve this error?
#app.route("/book", methods["GET", "POST"])
def get_books():
api_key =
os.environ.get("API_KEY")
if request.method == "POST":
book = request.form.get("book")
url =f"https://www.googleapis.com/books/v1/volumes?q={book}:keyes&key={api_key}"
response =urllib.request.urlopen(url)
data = response.read()
jsondata = json.loads(data)
return render_template ("book.html", books=jsondata["items"]
I tried to search for similar cases, and just found one solution, but I didn't understand it
Here is my error message
http.client.InvalidURL
http.client.InvalidURL: URL can't contain control characters. '/books/v1/volumes?q=white whale:keyes&key=AIzaSyDtjvhKOniHFwkIcz7-720bgtnubagFxS8' (found at least ' ')
Some chars in url need to be encoded - in your situation you have to use + or %20 instead of space.
This url has %20 instead of space and it works for me. If I use + then it also works
import urllib.request
import json
url = 'https://www.googleapis.com/books/v1/volumes?q=white%20whale:keyes&key=AIzaSyDtjvhKOniHFwkIcz7-720bgtnubagFxS8'
#url = 'https://www.googleapis.com/books/v1/volumes?q=white+whale:keyes&key=AIzaSyDtjvhKOniHFwkIcz7-720bgtnubagFxS8'
response = urllib.request.urlopen(url)
text = response.read()
data = json.loads(text)
print(data)
With requests you don't even have to do it manually because it does it automatically
import requests
url = 'https://www.googleapis.com/books/v1/volumes?q=white whale:keyes&key=AIzaSyDtjvhKOniHFwkIcz7-720bgtnubagFxS8'
r = requests.get(url)
data = r.json()
print(data)
You may use urllib.parse.urlencode() to make sure all chars are correctly encoded.
import urllib.request
import json
payload = {
'q': 'white whale:keyes',
'key': 'AIzaSyDtjvhKOniHFwkIcz7-720bgtnubagFxS8',
}
query = urllib.parse.urlencode(payload)
url = 'https://www.googleapis.com/books/v1/volumes?' + query
response = urllib.request.urlopen(url)
text = response.read()
data = json.loads(text)
print(data)
and the same with requests - it also doesn't need encoding
import requests
payload = {
'q': 'white whale:keyes',
'key': 'AIzaSyDtjvhKOniHFwkIcz7-720bgtnubagFxS8',
}
url = 'https://www.googleapis.com/books/v1/volumes'
r = requests.get(url, params=payload)
data = r.json()
print(data)
After a Post action on a certain Link I get the following answer
{"data":{"loginWithEmail":{"__typename":"LoginResponse","me":{"__typename":"User","username":"davishelenekb","displayname":"davishelenekb","avatar":"https://image.sitecdn.com/avatar/default11.png","partnerStatus":"NONE","role":"None","myChatBadges":[],"private":{"__typename":"UserPrivateInfo","accessToken":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtasdasdasdaslzaGVsZW5la2IiLCJkaXNwbGF5bmFtZSI6ImRhdmlzaGVsZW5la2IiLCJhdmF0YXIiOiJodHRwczovL2ltYWdlLmRsaXZlY2RuLmNvbS9hdmF0YXIvZGVmYXVsdDExLnBuZyIsInBhcnRuZXJfc3RhdHVzX3N0cmluZyI6Ik5PTkUiLCJpZCI6IiIsImxpZCI6MCwidHlwZSI6ImVtYWlsIiwicm9sZSI6Ik5vbmUiLCJvYXV0aF9hcHBpZCI6IiIsImV4cCI6MTYwOTE4NDQwNyadasdasdaNTkyNDA3LCJpc3MiOiJETGl2ZSJ9.cQXJFUEo7r4bQa2FPHvKAvjisEF1VKldhFdxOcZ3YTk","email":"email","emailVerified":true,"bttAddress":{"__typename":"MyBTTAddress","senderAddress":null}},"subCashbacked":true},"accessToken":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImRhdmlzaGVsZW5la2IiLCJkaXNwbGF5bmFtZSI6ImRhdmlzaGVsZW5la2IiLCJhdmF0YXIiOiJodHRwczovL2ltYWdlLmRsaXZlY2RuLmNvbS9hdmF0YXIvZGVmYasdasdyIsInBhcnRuZXJfc3RhdHVzX3N0cmluZyI6Ik5PTkUiLCJpasdasdlwZSI6ImVtYWlsIiwicm9sZSI6Ik5vbmUiLCJvYXV0aF9hcHBpZCI6IiIsImV4cCI6MTYwOTE4NDQasd221DA3LCJpc3MiOiJETGl2ZSJ9.cQXJFUEo7r4bQa2FPHvKAvjisEF1VKldhFdxOcZ3YTk","twofactorToken":null,"err":null}}}
I just want to extract the key that is in
"accessToken":"KEY",
How can I do this?
My Code
import requests
import json
from fake_useragent import UserAgent
#Set Modules
ua = UserAgent()
url = 'site'
#Read TXT
accounts = 'accounts\\accounts.txt'
with open(accounts) as line:
login = line.readline()
line = login.split(",")
cnt = 1
email = line[0]
password = line[1]
#login
head = {
'.......': '.........',
}
data = {
..........
}
test = requests.post(url, json.dumps(data), headers=head)
if test.status_code == 200:
print('Loged!')
print(test.text)
else:
print('Error') ```
You can take the text of the response, parse it as JSON, and then access the "accessToken" property:
test = requests.post(url, json.dumps(data), headers=head)
if test.status_code == 200:
parsed = json.loads(test.text)
key = parsed['data']['loginWithEmail']['accessToken']
print(key)
Side note:
This snippet assumes that the format of the returned JSON is well known and no error occurs. In a real-world scenario, you may want to add a few validations to it.
You can achieve what you need like this:
response = json.loads(test.text)
print(response["data"]["loginWithEmail"]["me"]["private"]["accessToken"])
I am using this API to list users. One of the parameters I could specify is a team id which is placed in an array. When I try to specify a team id it doesn't work when I put it in the payload, but it works when I change the url to include the team id.
This is the API reference: https://api-reference.pagerduty.com/#!/Users/get_users
Here is what I am basing my code off of: https://github.com/PagerDuty/API_Python_Examples/blob/master/REST_API_v2/Users/list_users.py
This is my code when I try to specify team id in the payload. It doesn't work like this for some reason, but it works when I change the url to url = 'https://api.pagerduty.com/users?team_ids%5B%5D=TEAMID&team_ids%5B%5D=' where in TEAMID I have an actual team id.
with open('config/config.json') as f:
config = json.load(f)
API_KEY = config['API_KEY']
TEAM_IDS = ['TEAMID']
def list_users():
url = 'https://api.pagerduty.com/users'
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={token}'.format(token=API_KEY)
}
payload = {
'team_ids[]': TEAM_IDS
}
r = requests.get(url, headers=headers)
result = []
if r.status_code == 200:
# loops for each user and retrieves their email
result = [user['email'] for user in r.json()['users']]
return result
else:
return None
I want to get this work by listing team id's in the array and sending it in the payload so that I can list more than one team id and not clutter them all in the url.
Looks like you just need something like this
payload = {
'team_ids[]': TEAM_IDS
}
r = requests.get(url, headers=headers, params=payload)
may I know if there is a solution to change my code in order to translate the website from Malay to English language but using Bing Translator instead.
import pandas
import urllib.request as ur
from bs4 import BeautifulSoup
from googletrans import Translator
translator = Translator()
url = "http://https://www.bharian.com.my/"
page = ur.urlopen(url)
df = pandas.DataFrame(columns=["Title", "Date", "Url", "Content"])
soup = BeautifulSoup(page, "html.parser")
headlines = soup.find_all("div", {"class": "ms-vb itx"})
intro = soup.find_all("div", {"class": "ms-rtestate-field"})
dates = soup.find_all("td", {"class": "ms-vb2"})
count = len(headlines)
for i in range(0, len(headlines)):
s = str(headlines[i].a.string)
url1 = headlines[i].a.get("href")
page1 = ur.urlopen(url1)
soup1 = BeautifulSoup(page1, "html.parser")
cont = soup1.find_all("div", {"style": "text-align:justify;"})
content = intro[2 * i].p.text
for data in cont:
content += data.text
content = translator.translate(content, src="ms", dest="en").text
s = translator.translate(s, src="ms").text
df = df.append(
{
"Title": s,
"Date": dates[i].string,
"Url": url1,
"Content": content,
},
ignore_index=True,
)
df.to_csv("News.csv")
# f.write(str(len(result))+'\n')
# for res in result:
# f.write(str(res.pre.string))
# f.close()
# while(driver.current_url == url):
# continue
Yes, there is, but you might not like it very much. Right now it looks like you're using the googletrans library from PyPi - https://pypi.org/project/googletrans/. A similar-looking package exists for Bing translate called bing_translator, however it looks as though this package is out-of-date. Microsoft themselves, however, have published code samples on GitHub:
import os, requests, uuid, json
key_var_name = 'TRANSLATOR_TEXT_SUBSCRIPTION_KEY'
if not key_var_name in os.environ:
raise Exception('Please set/export the environment variable: {}'.format(key_var_name))
subscription_key = os.environ[key_var_name]
endpoint_var_name = 'TRANSLATOR_TEXT_ENDPOINT'
if not endpoint_var_name in os.environ:
raise Exception('Please set/export the environment variable: {}'.format(endpoint_var_name))
endpoint = os.environ[endpoint_var_name]
# If you encounter any issues with the base_url or path, make sure
# that you are using the latest endpoint: https://learn.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate
path = '/translate?api-version=3.0'
params = '&to=de&to=it'
constructed_url = endpoint + path + params
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# You can pass more than one object in body.
body = [{
'text' : 'Hello World!'
}]
request = requests.post(constructed_url, headers=headers, json=body)
response = request.json()
print(json.dumps(response, sort_keys=True, indent=4, separators=(',', ': ')))
As you've probably noticed, this is a fair bit clunkier than your nice googletrans package. You might want to make your own abstraction layer to make this easier (and maybe publish it on PyPi!).
TRANSLATOR_TEXT_SUBSCRIPTION_KEY and TRANSLATOR_TEXT_ENDPONT should be filled in with your Translation service endpoint and subsciption keys. Whilst Google seem happy enough for you to freely use their API, Microsoft would like you to create an account. Whilst it looks like you can get hold of a free key, depending on what you're using it for Microsoft might expect payment. The links on the github page should take you to the relevant articles for that.
This question already has answers here:
Map JSON response from Ruby to Python
(2 answers)
Closed 4 years ago.
Hey I made a program that takes advantage of a JSON API response in Ruby and I'd like to port it to python, but I don't really know how
JSON response:
{
"Class": {
"Id": 1948237,
"family": "nature",
"Timestamp": 941439
},
"Subtitles": [
{
"Id":151398,
"Content":"Tree",
"Language":"en"
},
{
"Id":151399,
"Content":"Bush,
"Language":"en"
}
]
}
And here's the Ruby code:
def get_word
r = HTTParty.get('https://example.com/api/new')
# Check if the request had a valid response.
if r.code == 200
json = r.parsed_response
# Extract the family and timestamp from the API response.
_, family, timestamp = json["Class"].values
# Build a proper URL
image_url = "https://example.com/image/" + family + "/" + timestamp.to_s
# Combine each line of subtitles into one string, seperated by newlines.
word = json["Subtitles"].map{|subtitle| subtitle["Content"]}.join("\n")
return image_url, word
end
end
Anyway I could port this code to Python using requests and maybe json modules?
I tried but failed miserably
Per request; what I've already tried:
def get_word():
r = requests.request('GET', 'https://example.com/api/new')
if r.status_code == 200:
# ![DOESN'T WORK]! Extract the family and timestamp from the API
json = requests.Response
_, family, timestamp = json["Class"].values
# Build a proper URL
image_url = "https://example.com/image/" + family + "/" + timestamp
# Combine each line of subtitles into one string, seperated by newlines.
word = "\n".join(subtitle["Content"] for subtitle in json["Subtitles"])
print (image_url + '\n' + word)
get_word()
The response and _, family, timestamp = json["Class"].values code don't work as I don't know how to port them.
If you're using the requests module, you can call requests.get() to make a GET call, and then use json() to get the JSON response. Also, you shouldn't be using json as a variable name if you're importing the json module.
Try making the following changes in your function:
def get_word():
r = requests.get("https://example.com/api/new")
if r.status_code == 200:
# Extract the family and timestamp from the API
json_response = r.json()
# json_response will now be a dictionary that you can simply use
...
And use the json_response dictionary to get anything you need for your variables.