python handling incoming from url - python

i'm sending below request to URL and get the response from it
import requests
url = "http://localhost/dat.txt"
payload = {}
headers = {
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Sec-Fetch-Dest': 'document',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}
response = requests.request("GET", url, headers=headers, data = payload)
print(response.text.encode('utf8'))
Below is the response data that I get -
mohame4|nameon#example.com|passsd!##$4|head,customer|manager,devlop
mohame3|nameon3#example.com|passsd!##$4|head,customer|manager,devlop
I do this with the data
for i in response.text:
try:
i = i.strip().split('|')
userna = i[0]
emaill = i[1]
passd = i[2]
rol1= i[3]
rol2= i[4]
except:
pass
How can I make rol1 as
this head,customer
to
rol1=['head','customer']

Simply split the string you're getting:
rol1 = i[3].split(',')
You could do this more... gracefully, though, using iterable unpacking:
username, email, password, rol1, rol2 = i.strip().split('|')
rol1 = rol1.split(',')

thanks for all helper special #ForceBru
import requests
url = "http://localhost/dat.txt"
response = requests.request("GET", url)
print(response.text)
dat = str(response.text).split('\n')
for i in dat:
i = i.strip().split('|')
print(i[3].split(","))
# TODO: write code...

Related

Python request cookies setting does not work

I want to crawl the data from this website:'http://www.stcn.com/article/search.html?search_type=all&page_time=1', but the website needs to have cookies on the homepage first, so I first get the cookies he needs from this website('http://www.stcn.com/article/search.html') and set them into the request, but it doesn't work after many attempts.
My code looks like this:
import requests
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36','Host':'www.stcn.com'}
def _getStcnCookie(keyWords='all'):
url = "http://www.stcn.com/article/search.html"
data = {'keyword': keyWords}
r = requests.get(url, data, headers=headers, timeout=10)
if r.status_code != 200:
return None
return requests.utils.dict_from_cookiejar(r.cookies)
def searchStcnData(url,keyWords) :
myHeader = dict.copy(headers)
myHeader['X-Requested-With'] = 'XMLHttpRequest'
cookies = _getStcnCookie(keyWords=keyWords)
print(cookies)
jar = requests.cookies.cookiejar_from_dict(cookies)
data = {'keyword':'Paxlovid', 'page_time': 1, 'search_type': 'all'}
#Option One
s = requests.Session()
response = s.post(url, data, headers=myHeader, timeout=5, cookies=cookies)
print(response.text)
# Option two
# myHeader['Cookie'] = 'advanced-stcn_web=potef1789mm5nqgmd6jc1rcih3; path=/; HttpOnly;'+cookiesStr
# Option three
r = requests.post(url, data, headers=myHeader, timeout=5, cookies=cookies)
print(r.json())
return r.json()
searchStcnData('http://www.stcn.com/article/search.html?search_type=all&page_time=1','Paxlovid')
I've tried options 1, 2, and 3 to no avail.
I set cookies in Postman, and only set 'advanced-stcn_web=5sdfitvu42qggmnjvop4dearj4' can get the data, like this :
{
"state": 1,
"msg": "操作成功",
"data": "<li class=\"\">\n <div class=\"content\">\n <div class=\"tt\">\n <a href=\"/article/detail/769123.html\" target=\"_blank\">\n ......
"page_time": 2
}

How to change Json data output in table format

import requests
from pprint import pprint
import pandas as pd
baseurl = "https://www.nseindia.com/"
url = f'https://www.nseindia.com/api/live-analysis-oi-spurts-underlyings'
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, '
'like Gecko) '
'Chrome/80.0.3987.149 Safari/537.36',
'accept-language': 'en,gu;q=0.9,hi;q=0.8', 'accept-encoding': 'gzip, deflate, br'}
session = requests.Session()
request = session.get(baseurl, headers=headers, timeout=30)
cookies = dict(request.cookies)
res = session.get(url, headers=headers, timeout=30, cookies=cookies)
print(res.json())
I tried df = pd.DataFrame(res.json()) but couldn't get data in table format. How to do that Plz. Also how to select few particular columns only in data output instead of all columns.
Try this :
import json
import codecs
df = pd.DataFrame(json.loads(codecs.decode(bytes(res.text, 'utf-8'), 'utf-8-sig'))['data'])
And to select a specific columns, you can use :
mini_df = df[['symbol', 'latestOI', 'prevOI', 'changeInOI', 'avgInOI']]
>>> print(mini_df)

problem with coinex futures api signature and authorization in python

when I whant to send request to coinex futures api I get authorization fail.can you send me the correct code for python3.9?
coinex docs:
Authorization
The authorization process is as follows
The input parameter string of the http message is as follows:
market=BTCUSD&type=buy&price=680&amount=1.0&timestamp=1550743431000
Paste the secret_key to the end of the above string as:
market=BTCUSD&type=buy&price=680&amount=1.0&timestamp=1550743431000&secret_key=B51068CF10B34E7789C374AB932696A05E0A629BE7BFC62F
Note: secret_key parameter is not required to send the http message body, this step is just for calculating the sha256 signature.
Perform sha256 on the above string, convert it to hexadecimal lowercase, and the length is 64 bits, and then add this signature to the http header as follows:
Authorization: a174066d9ccbeb33803c2a84e20792d31bed5a6e3da8fca23e38fc8dbb917a13
Add AccessId in the http header, and the server will look for the corresponding user information according to AccessId: 4DA36FFC61334695A66F8D29020EB589
After receiving the http message, the server finds the user’s secret key according to the AccessId, and performs the same operation as above to determine whether the received signature is equal to the signature calculated by itself. If they are equal, the authorization succeeds, otherwise it fails.
import time
import hashlib
import requests
access_id = '5#######################8'
secret_key = 'C##########################################7'
base_url = 'https://api.coinex.com/perpetual/v1'
def get_sign(params,secret_key):
data = []
for item in params:
data.append(item + '=' + str(params[item]))
str_params = "{0}&secret_key={1}".format('&'.join(data), secret_key)
token = hashlib.sha256(str_params.encode()).hexdigest().lower()
return token
def Adjust_Leverage():
header = {
'Content-Type': 'application/json; charset=utf-8',
'Accept': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
}
timestamp = int(time.time()*1000)
params = {
'market': 'BTCUSDT',
'leverage':'10',
'position_type':1,
'timestamp':timestamp}
header['Authorization'] = get_sign(params , secret_key)
header['AccessId'] = access_id
res = requests.post(
url=f'{base_url}/market/adjust_leverage',
headers=header,
json=params
)
return res.text
def Market_Order():
header = {
'Content-Type': 'application/json; charset=utf-8',
'Accept': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
}
timestamp = int(time.time()*1000)
params = {
'market': 'BTCUSDT',
'side':1,
'amount':'10',
'timestamp':timestamp}
header['authorization'] = get_sign(params , secret_key)
header['AccessId'] = access_id
res = requests.post(
url=f'{base_url}/order/put_market',
headers=header,
json=params
)
return res.text
print(Adjust_Leverage())
I use below code but I got authorization fail again:
def get_sign(params, secret_key):
data = ['='.join([str(k), str(v)]) for k, v in params.items()]
str_params = "{0}&secret_key={1}".format(
'&'.join(data), secret_key).encode()
token = hashlib.sha256(str_params).hexdigest()
return token

Obtain data of Freight Index in python

I am trying to get the data from this website, https://en.macromicro.me/charts/947/commodity-ccfi-scfi , for China and Shanghai Continerized Freight Index.
I understand that the data is called from an API, how do I find out how the call is made and how do I extract it using python?
I am new in html in general so I have no idea where to start.
I tried,
import requests
url = "https://en.macromicro.me/charts/data/947/commodity-ccfi-scfi"
resp = requests.get(url)
resp = resp.json()
But the response is <Response [404]>
If I change the url to https://en.macromicro.me/charts/data/947/
the response is {'success': 0, 'data': [], 'msg': 'error #644'}
Try the below
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36',
'Referer': 'https://en.macromicro.me/charts/947/commodity-ccfi-scfi',
'X-Requested-With': 'XMLHttpRequest',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Authorization': 'Bearer 9633cefba71a598adae0fde0b56878fe',
'Cookie': 'PHPSESSID=n74gv10hc8po7mrj491rk4sgo1; _ga=GA1.2.1231997091.1631627585; _gid=GA1.2.1656837390.1631627585; _gat=1; _hjid=c52244fd-b912-4d53-b0e3-3f11f430b51c; _hjFirstSeen=1; _hjAbsoluteSessionInProgress=0'}
r = requests.get('https://en.macromicro.me/charts/data/947', headers=headers)
print(r.json())
output
{'success': 1, 'data': {' ...}

Yandex Spellchecker API Returns Empty Array

I am trying to harness a Russian language spellcheck API, Yandex.Speller.
The request seems to work fine in my browser. However, when I use a python script, the response is empty.
I am stumped as to what I am doing wrong.
Here is my code:
import urllib
from urllib.request import urlopen
import json
def main():
api(text_preproc())
def text_preproc():
""" Takes misspelled word/phrase,
“t”, and prepares it for
API request
"""
t = "синхрафазатрон в дубне"
text = t.replace(" ", "+")
return text
def diff_api(text):
my_url = "https://speller.yandex.net/services/spellservice.json/checkText?text="
my_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
my_data = {
"text" : text,
"lang" : "ru",
"format" : "plain"}
my_uedata = urllib.parse.urlencode(my_data)
my_edata = my_uedata.encode('ascii')
req = urllib.request.Request(url=my_url, data=my_edata, headers=my_headers)
response = urlopen(req)
data = json.load(response)
print(data)
The response is always an empty array, no matter how I tinker with my request.
Any insight into what I might be doing wrong?
my_uedata has to be a part of the URL you send the request to.
Also, in:
def main():
api(text_preproc())
You call api() but the function is not defined. I've used diff_api().
Try this:
import json
import urllib
from urllib.request import urlopen
def main():
diff_api(text_preproc("синхрафазатрон в дубне"))
def text_preproc(phrase):
""" Takes misspelled word/phrase,
“t”, and prepares it for
API request
"""
return phrase.replace(" ", "+")
def diff_api(text):
my_url = "https://speller.yandex.net/services/spellservice.json/checkText?text="
my_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
my_data = {
"text": text,
"lang": "ru",
"format": "plain"}
my_uedata = urllib.parse.urlencode(my_data)
req = urllib.request.Request(url=my_url+my_uedata, headers=my_headers)
data = json.load(urlopen(req))
print(data)
main()
Output:
[{'code': 1, 'pos': 5, 'row': 0, 'col': 5, 'len': 14, 'word': 'синхрафазатрон', 's': ['синхрофазотрон', 'синхрофазатрон', 'синхрофазотрона']}]

Categories

Resources