Assign text list to variable python - python

I have a python script which allows me to check if a number is used on telegram or not.
I try to change the variable "phone_number" to a .txt list which basically contain phone number (There is one phone number per line)
I want the script to take a phone number from the file.txt check if it exists or not then move on to the next one and so on until all the numbers are checked.
This is what i try so far...
import random
from telethon import TelegramClient
from telethon import functions, types
import ast
api_id = XXXXX
api_hash = 'XXXXXXXXXXXXXXXXXX'
client = TelegramClient('session', api_id, api_hash)
async def main():
phone_in = []
with open('file.txt', 'r') as f:
phone_str = f.readline()
phone_in.append(ast.literal_eval(phone_str))
result = await client(functions.contacts.ImportContactsRequest(
contacts=[types.InputPhoneContact(
client_id=random.randrange(-2**63, 2**63),
phone=phone_in,
first_name='Some Name',
last_name=''
)]
))
if len(result.users):
print(f"{phone_in} has a telegram account")
await client(functions.contacts.DeleteContactsRequest(result.users))
else:
print(f"couldn't find an account for {phone_in}")
client.start()
client.loop.run_until_complete(main())
I tried this but I had an error which is the following :
Traceback (most recent call last):
File "/Users/me/phone.py", line 33, in <module>
client.loop.run_until_complete(main())
File "/usr/local/Cellar/python#3.9/3.9.1_7/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete
return future.result()
File "/Users/me/phone.py", line 17, in main
result = await client(functions.contacts.ImportContactsRequest(
File "/usr/local/lib/python3.9/site-packages/telethon/client/users.py", line 30, in __call__
return await self._call(self._sender, request, ordered=ordered)
File "/usr/local/lib/python3.9/site-packages/telethon/client/users.py", line 58, in _call
future = sender.send(request, ordered=ordered)
File "/usr/local/lib/python3.9/site-packages/telethon/network/mtprotosender.py", line 174, in send
state = RequestState(request)
File "/usr/local/lib/python3.9/site-packages/telethon/network/requeststate.py", line 17, in __init__
self.data = bytes(request)
File "/usr/local/lib/python3.9/site-packages/telethon/tl/tlobject.py", line 194, in __bytes__
return self._bytes()
File "/usr/local/lib/python3.9/site-packages/telethon/tl/functions/contacts.py", line 498, in _bytes
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.contacts)),b''.join(x._bytes() for x in self.contacts),
File "/usr/local/lib/python3.9/site-packages/telethon/tl/functions/contacts.py", line 498, in <genexpr>
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.contacts)),b''.join(x._bytes() for x in self.contacts),
File "/usr/local/lib/python3.9/site-packages/telethon/tl/types/__init__.py", line 9789, in _bytes
self.serialize_bytes(self.phone),
File "/usr/local/lib/python3.9/site-packages/telethon/tl/tlobject.py", line 112, in serialize_bytes
raise TypeError(
TypeError: bytes or str expected, not <class 'list'>
Here is the same code but the phone number to check is "hardcoded"
import random
from telethon import TelegramClient
from telethon import functions, types
api_id = XXXXXXX
api_hash = 'XXXXXXXXXXXXXXXXX'
client = TelegramClient('session', api_id, api_hash)
async def main():
phone_number = '+XXXXXXXXX'
result = await client(functions.contacts.ImportContactsRequest(
contacts=[types.InputPhoneContact(
client_id=random.randrange(-2**63, 2**63),
phone=phone_number,
first_name='Some Name',
last_name=''
)]
))
if len(result.users):
print(f"{phone_number} has a telegram account")
await client(functions.contacts.DeleteContactsRequest(result.users))
else:
print(f"couldn't find an account for {phone_number}")
client.start()
client.loop.run_until_complete(main())
Does anyone know how I can assign the file.txt to the phone_in variable?

If ImportContactsRequests expects one phone number at a time, then you have to call it for each phone number. That will create multiple records for a single name, but if the API doesn't allow multiple phone numbers per person, you'll have to decide how to handle it.
with open('file.txt', 'r') as f:
phone_str = f.readline()
result = await client(functions.contacts.ImportContactsRequest(
contacts=[types.InputPhoneContact(
client_id=random.randrange(-2**63, 2**63),
phone=phone_str,
first_name='Some Name',
last_name=''
)]
))
if len(result.users):
print(f"{phone_number} has a telegram account")
await client(functions.contacts.DeleteContactsRequest(result.users))
else:
print(f"couldn't find an account for {phone_number}")

According to the doc of InputPhoneContact, the phone argument takes string type not list. So you could read all phones in the file.txt first, then loop through the list.
async def main():
phone_in = []
with open('file.txt', 'r') as f:
phone_str = f.readline()
phone_in.append(ast.literal_eval(phone_str))
for phone in phone_in:
result = await client(functions.contacts.ImportContactsRequest(
contacts=[types.InputPhoneContact(
client_id=random.randrange(-2**63, 2**63),
phone=phone,
first_name='Some Name',
last_name=''
)]
))
if len(result.users):
print(f"{phone} has a telegram account")
await client(functions.contacts.DeleteContactsRequest(result.users))
else:
print(f"couldn't find an account for {phone}")

python does allow for creation of heterogeneous lists, so not sure why this is throwing an error. Depending on the version in use, maybe there is a constraint on the type homogeneity in the list.I'm not sure though...but curious to know if the following works?
Can you try with a small version of the file in which the numbers are of the same 'type'?
Alternately, can try with a x.strip("[/{(") before appending it to phone_in.

Related

Why does my second python async (scraping) function (which uses results from the first async (scraping) function) return no result?

Summary of what the program should do:
Step 1 (sync): Determine exactly how many pages need to be scraped.
Step 2 (sync): create the links to the pages to be scraped in a for-loop.
Step 3 (async): Use the link list from step 2 to get the links to the desired detail pages from each of these pages.
Step 4 (async): Use the result from step 3 to extract the detail information for each hofladen. This information is stored in a list for each farm store and each of these lists is appended to a global list.
Where do I have the problem?
The transition from step 3 to step 4 does not seem to work properly.
Traceback (most recent call last):
File "/Users/REPLACED_MY_USER/PycharmProjects/PKI-Projekt/test_ttt.py", line 108, in <module>
asyncio.run(main())
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/asyncio/base_events.py", line 649, in run_until_complete
return future.result()
File "/Users/REPLACED_MY_USER/PycharmProjects/PKI-Projekt/test_ttt.py", line 96, in main
await asyncio.gather(*tasks_detail_infos)
File "/Users/REPLACED_MY_USER/PycharmProjects/PKI-Projekt/test_ttt.py", line 61, in scrape_detail_infos
data = JsonLdExtractor().extract(body_d)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/extruct/jsonld.py", line 21, in extract
tree = parse_html(htmlstring, encoding=encoding)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/extruct/utils.py", line 10, in parse_html
return lxml.html.fromstring(html, parser=parser)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/lxml/html/__init__.py", line 873, in fromstring
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/lxml/html/__init__.py", line 761, in document_fromstring
raise etree.ParserError(
lxml.etree.ParserError: Document is empty
Process finished with exit code 1
What did I do to isolate the problem?
In a first attempt I rewrote the async function append_detail_infos so that it no longer tries to create a list and append the values but only prints data[0]["name"].
This resulted in the error message
Traceback (most recent call last):
File "/Users/REPLACED_MY_USER/PycharmProjects/PKI-Projekt/test_ttt.py", line 108, in <module>
asyncio.run(main())
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/asyncio/base_events.py", line 649, in run_until_complete
return future.result()
File "/Users/REPLACED_MY_USER/PycharmProjects/PKI-Projekt/test_ttt.py", line 96, in main
await asyncio.gather(*tasks_detail_infos)
File "/Users/REPLACED_MY_USER/PycharmProjects/PKI-Projekt/test_ttt.py", line 61, in scrape_detail_infos
data = JsonLdExtractor().extract(body_d)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/extruct/jsonld.py", line 21, in extract
tree = parse_html(htmlstring, encoding=encoding)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/extruct/utils.py", line 10, in parse_html
return lxml.html.fromstring(html, parser=parser)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/lxml/html/__init__.py", line 873, in fromstring
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
File "/Users/REPLACED_MY_USER/miniconda3/envs/scrapy/lib/python3.10/site-packages/lxml/html/__init__.py", line 761, in document_fromstring
raise etree.ParserError(
lxml.etree.ParserError: Document is empty
Process finished with exit code 1
In the next attempt, I exported the links from detail_links as .csv and visually checked them and opened some of them to see if they were valid. This was also the case.
The program code:
import asyncio
import time
import aiohttp
import requests
import re
from selectolax.parser import HTMLParser
from extruct.jsonld import JsonLdExtractor
import pandas as pd
BASE_URL = "https://hofladen.info"
FIRST_PAGE = 1
def get_last_page(url: str) -> int:
res = requests.get(url).text
html = HTMLParser(res)
last_page = int(re.findall("(\d+)", html.css("li.page-last > a")[0].attributes["href"])[0])
return last_page
def build_links_to_pages(start: int, ende: int) -> list:
lst = []
for i in range(start, ende + 1):
url = f"https://hofladen.info/regionale-produkte?page={i}"
lst.append(url)
return lst
async def scrape_detail_links(url: str):
async with aiohttp.ClientSession() as session:
async with session.get(url, allow_redirects=True) as resp:
body = await resp.text()
html = HTMLParser(body)
for node in html.css(".sp13"):
detail_link = BASE_URL + node.attributes["href"]
detail_links.append(detail_link)
async def append_detail_infos(data):
my_detail_lst = []
# print(data[0]["name"]) # name for debugging purpose
my_detail_lst.append(data[0]["name"]) # name
my_detail_lst.append(data[0]["address"]["streetAddress"]) # str
my_detail_lst.append(data[0]["address"]["postalCode"]) # plz
my_detail_lst.append(data[0]["address"]["addressLocality"]) # ort
my_detail_lst.append(data[0]["address"]["addressRegion"]) # bundesland
my_detail_lst.append(data[0]["address"]["addressCountry"]) # land
my_detail_lst.append(data[0]["geo"]["latitude"]) # breitengrad
my_detail_lst.append(data[0]["geo"]["longitude"]) # längengrad
detail_infos.append(my_detail_lst)
async def scrape_detail_infos(detail_link: str):
async with aiohttp.ClientSession() as session_detailinfos:
async with session_detailinfos.get(detail_link) as res_d:
body_d = await res_d.text()
data = JsonLdExtractor().extract(body_d)
await append_detail_infos(data)
async def main() -> None:
start_time = time.perf_counter()
# Beginn individueller code
# ----------
global detail_links, detail_infos
detail_links, detail_infos = [], []
tasks = []
tasks_detail_infos = []
# extrahiere die letzte zu iterierende Seite
last_page = get_last_page("https://hofladen.info/regionale-produkte")
# scrape detail links
links_to_pages = build_links_to_pages(FIRST_PAGE, last_page)
for link in links_to_pages:
task = asyncio.create_task(scrape_detail_links(link))
tasks.append(task)
print("Saving the output of extracted information.")
await asyncio.gather(*tasks)
pd.DataFrame(data=detail_links).to_csv("detail_links.csv")
# scrape detail infos
for detail_url in detail_links:
task_detail_infos = asyncio.create_task(scrape_detail_infos(detail_url))
tasks_detail_infos.append(task_detail_infos)
await asyncio.gather(*tasks_detail_infos)
# Ende individueller Code
# ------------
time_difference = time.perf_counter() - start_time
print(f"Scraping time: {time_difference} seconds.")
print(len(detail_links))
# print(detail_infos[])
asyncio.run(main())
A working solution to the problem:
added python allow_redirects=True to python async with session_detailinfos.get(detail_link, allow_redirects=True) as res_d:
added python return_exceptions=True to python await asyncio.gather(*tasks_detail_infos, return_exceptions=True)
A working solution to the problem:
added
python allow_redirects=True to python async with session_detailinfos.get(detail_link, allow_redirects=True) as res_d:
added python return_exceptions=True to python await asyncio.gather(*tasks_detail_infos, return_exceptions=True)

I am trying to create a server stats counter that updates automatically

I will start off with i have tried and retried everything i can think of, i just need more assistance, or formal training i guess. Anyways here's my issue. I have the initial part working where it creates everything, the struggle i am having is the following error:
Unhandled exception in internal background task 'update_stats'.
Traceback (most recent call last):
File "C:\Users\Richard Ille\AppData\Local\Programs\Python\Python39\lib\site-packages\discord\ext\tasks\__init__.py", line 101, in _loop
await self.coro(*args, **kwargs)
File "C:\Users\Richard Ille\Desktop\Yota Inc discord bot\Yota Inc Bot.py", line 1484, in update_stats
chan1a = chan1[str(guild.id)]
KeyError: '626094990984216586'
I will include my code here as well, however i have spent a great deal of time googling to no avail. This is just the task.loop portion but it is the portion that does not work. I did have it working as a command but obviously that is not an idea solution.
#tasks.loop(seconds=600)
async def update_stats():
for guild in client.guilds:
allmem = guild.member_count
mem = len([m for m in guild.members if not m.bot])
bots = allmem - mem
boosts = guild.premium_subscription_count
with open('allmem.json', 'r') as fp:
chan1 = json.load(fp)
#print(f'chan1: {chan1}')
chan1a = chan1[str(guild.id)]
#print(f'chan1a: {chan1a}')
with open('mem.json', 'r') as fp:
chan2 = json.load(fp)
#print(f'chan2: {chan2}')
chan2a = chan2[str(guild.id)]
#print(f'chan2a: {chan2a}')
with open('bots.json', 'r') as fp:
chan3 = json.load(fp)
#print(f'chan3: {chan3}')
chan3a = chan3[str(guild.id)]
#print(f'chan3a: {chan3a}')
with open('boosts.json', 'r') as fp:
chan4 = json.load(fp)
#print(f'chan4: {chan4}')
chan4a = chan4[str(guild.id)]
#print(f'chan4a: {chan4a}')
#if chan1a:
channel = client.get_channel(chan1a)
print(f'channel: {channel}')
await channel.edit(name=f'All Members: {allmem}')
#await ctx.send('updated')
#if chan2a:
channel2 = client.get_channel(chan2a)
print(f'channel2: {channel2}')
await channel.edit(name=f'Members: {mem}')
#if chan3a:
channel3 = client.get_channel(chan3a)
print(f'channel3: {channel3}')
await channel.edit(name=f'Bots: {bots}')
#await ctx.send('updated')
#if chan4a:
channel4 = client.get_channel(chan4a)
print(f'channel4: {channel4}')
await channel.edit(name=f'Boosts: {boosts}')
update_stats.start()
Check to see if your key exists in your dictionary before trying to access it or it will cause this.
if str(guild.id) in chan1:
#do work

TypeError: 'NoneType' object is not iterable - SQLite and discord.py

The function will check if the channel with the reaction is a private channel between bot and user and then do other things.
The code:
#bot.event
async def on_raw_reaction_add(payload):
channel = bot.get_channel(payload.channel_id)
msg = await channel.fetch_message(payload.message_id)
emoji = payload.emoji
author = payload.member
if emoji.is_custom_emoji():
emoji_count = discord.utils.get(msg.reactions, emoji=emoji).count
else:
emoji_count = discord.utils.get(msg.reactions, emoji = emoji.name).count
cur.execute(f"SELECT discord_user_dmchannel_id FROM users WHERE discord_user_id = \
{int(payload.user_id)};")
print(cur.fetchone())
channel_dm_id_list = list(cur.fetchone())
channel_dm_id = channel_dm_id_list[0]
if payload.channel_id == channel_dm_id:
if int(emoji_count) > 1:
if emoji = ...
The output:
(782664385889959976,)
Ignoring exception in on_raw_reaction_add
Traceback (most recent call last):
File "C:\Users\plays\AppData\Local\Programs\Python\Python38-32\lib\site-packages\discord\client.py",
line 312, in _run_event
await coro(*args, **kwargs)
File "C:\Users\plays\OneDrive\Рабочий стол\Python\bot2.py", line 130, in on_raw_reaction_add
channel_dm_id_list = list(cur.fetchone())
TypeError: 'NoneType' object is not iterable
The table columns:
users(
discord_user_id INT PRIMARY KEY,
discord_user_dmchannel_id INT,
discord_user_name TEXT,
...
...);
fetchone() is returning the next row.
In the line print(cur.fetchone()) you are already getting the first row. In the next line channel_dm_id_list = list(cur.fetchone()) you are trying to get the second value. But since there is no second value, the method returns None which is causing your error. So either remove the print statement or store the first result like that:
channel_dm_id_list = list(cur.fetchone())
print(channel_dm_id_list)

Telegram Telethon : How to get messages if you know channel hash

When I have to deal with that kind of link : https://t.me/channelName
I usually get message from channel like this :
async def main():
channel = await client.get_entity('channelName')
messages = await client.get_messages(channel, limit = N )
#...Code etc..
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
How do you get message if you link is like this ?
https://t.me/joinchat/DDDDDxxxxAAAA
I know that 'DDDDDxxxxAAAA' is the channel_hash, so I joined the channel by telegram client and I tried get channel id and message object :
channel_hash = "DDDDDxxxxAAAA"
channel = await client(functions.messages.CheckChatInviteRequest(hash=channel_hash))
Now , channel object containes channel ID and many others fields.
example : (chat=Channel(id=123456789,...etcc
So , I tried like this:
messages = await client.get_messages(channel, limit = N )
but it returns :
Traceback (most recent call last):
File "C:\Users\****\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.9_qbz5n2kfra8p0\LocalCache\local-packages\Python39\site-packages\telethon\sessions\memory.py", line 192, in get_input_entity
return utils.get_input_peer(key)
File "C:\Users\****\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.9_qbz5n2kfra8p0\LocalCache\local-packages\Python39\site-packages\telethon\utils.py", line 235, in get_input_peer
_raise_cast_fail(entity, 'InputPeer')
File "C:\Users\****\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.9_qbz5n2kfra8p0\LocalCache\local-packages\Python39\site-packages\telethon\utils.py", line 138, in _raise_cast_fail
raise TypeError('Cannot cast {} to any kind of {}.'.format(
TypeError: Cannot cast ChatInviteAlready to any kind of InputPeer.
TypeError: Cannot cast ChatInviteAlready to any kind of InputPeer.
How to get messages if you only know channel hash ?
Thanks for help

Creating private Vc discord.py

I am trying to create a private vc for a function i am trying to make. it makes the vc with the permissions i want it to make, but then it gives an error after it made it and i cannot put any code behind it.
This is the code:
guild = ctx.author.guild
players_role = await guild.create_role(name=f"Players {game_id}")
for i in all_players:
print(all_players)
print(i)
i = guild.get_member(int(i[1]))
await i.add_roles(players_role)
overwrites = {
guild.default_role: discord.PermissionOverwrite(view_channel=False),
players_role: discord.PermissionOverwrite(view_channel=True)
}
players_vc = await guild.create_voice_channel(f"Game: {game_id}", overwrites=overwrites)
await players_vc.edit(position=len(ctx.guild.voice_channels))
print("edited position")
What I am trying to do is edit the vc I created so that it gets pulled to the bottom, but it doesnt do this, because it gives an error in the previous line. I tried putting the players_vc = await guild.create_voice_channel(f"Game: {game_id}", overwrites=overwrites) in try: and except: but I need the variable to edit the position. here is the error message:
Traceback (most recent call last):
File "C:\Users\Gebruiker\AppData\Local\Programs\Python\Python37-32\lib\site-packages\discord\ext\commands\core.py", line 83, in wrapped
ret = await coro(*args, **kwargs)
File "D:\Thijs coding map\TeaBot\TeaBot.py", line 889, in vc_test
test_vc = await guild.create_voice_channel(f"test vc", overwrites=overwrites)
File "C:\Users\Gebruiker\AppData\Local\Programs\Python\Python37-32\lib\site-packages\discord\guild.py", line 888, in create_voice_channel
channel = VoiceChannel(state=self._state, guild=self, data=data)
File "C:\Users\Gebruiker\AppData\Local\Programs\Python\Python37-32\lib\site-packages\discord\channel.py", line 553, in __init__
self._update(guild, data)
File "C:\Users\Gebruiker\AppData\Local\Programs\Python\Python37-32\lib\site-packages\discord\channel.py", line 584, in _update
self._fill_overwrites(data)
File "C:\Users\Gebruiker\AppData\Local\Programs\Python\Python37-32\lib\site-packages\discord\abc.py", line 294, in _fill_overwrites
self._overwrites.append(_Overwrites(id=overridden_id, **overridden))
TypeError: __new__() got an unexpected keyword argument 'allow_new'
for troubleshooting, you can use this code:
#client.command()
async def vc_test(ctx):
guild = ctx.guild
user = ctx.author
overwrites = {
guild.default_role: discord.PermissionOverwrite(view_channel=False),
guild.me: discord.PermissionOverwrite(view_channel=True)
}
test_vc = await guild.create_voice_channel(f"test vc", overwrites=overwrites)
await test_vc.edit(position=len(guild.voice_channels))
print("edited position")
I couldn't find this issue elsewhere
all help is appreciated
solution:
venv\lib\site-package\discord\abc.py
open and find
_Overwrites = namedtuple('_Overwrites', 'id allow deny type')
and replace
class _Overwrites:
__slots__ = ('id', 'allow', 'deny', 'type')
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.allow = kwargs.pop('allow', 0)
self.deny = kwargs.pop('deny', 0)
self.type = kwargs.pop('type')
def _asdict(self):
return {
'id': self.id,
'allow': self.allow,
'deny': self.deny,
'type': self.type,
}
You can look here

Categories

Resources