Detect the end of a song using Spotipy - python

I'm using Spotipy and LyricsGenius to open lyrics on a web browser from a terminal.
I can open a url for one song, but have to run the script each time to run consecutively. What are some ways to detect the end of a song using Spotipy?
import spotipy
import webbrowser
import lyricsgenius as lg
...
# Create our spotifyObject
spotifyObject = spotipy.Spotify(auth=token)
# Create out geniusObject
geniusObject = lg.Genius(access_token)
...
while True:
currently_playing = spotifyObject.currently_playing()
artist = currently_playing['item']['artists'][0]['name']
title = currently_playing['item']['name']
search_query = artist + " " + title
# if (currently_playing has changed):
song = geniusObject.search_songs(search_query)
song_url = song['hits'][0]['result']['url']
webbrowser.open(song_url)
webbrowser.open(song_url)
I was reading relevant threads such as this, this, and read through documentation but could not find an answer to my question if this could be handled by Spotipy. I would appreciate any suggestions, thank you.

I used time.sleep(length) with the argument 'length' standing for the remaining duration of a current track.

Related

How do I make my reddit bot work at specified intervals?

Essentially, I've written a program for a reddit bot designed to list down certain apsects of a reddit post, such as the title or poster, as long as they fit a certain criteria. I want it to be able to automatically run once every hour. I also want it to be able to make a post once every 7 days. Could someone share code for these please?
#!/usr/bin/python
import base64
import praw
#Enter your correct Reddit information into the variable below
userAgent = 'RRBot-Beta'
cID = 'Enter your so and so'
cSC = 'Enter your secret'
userN = 'Enter your Reddit username'
userP = 'Enter your Reddit password'
unfilled_post_URL = [""]
unfilled_post_url_B64 = [""]
submission_title_and_poster = {}
filled_requests = 0
unfilled_requests = 0
requests = 0
reddit = praw.Reddit(user_agent=userAgent,
client_id=cID,
client_secret=cSC,
username=userN,
password=userP)
subreddit = reddit.subreddit('riprequestsnew') #any subreddit you want to monitor
title_keywords = {'requests', 'request'} #makes a set of keywords to find in subreddits
comment_keyword = "share"
for submission in subreddit:
lowercase_title = submission.title.lower() #makes the post title lowercase so we can compare our keywords with it.
for title_keyword in title_keywords: #goes through our keywords
if title_keyword in lowercase_title: #if one of our keywords matches a title in the subreddit
requests = requests + 1 #tracks the number of requests
for post in requests:
comments = subreddit.submission.comment.lower() #makes the comment text lowercase
if comment_keyword in comments: #checks the comment text for our keyword
filled_requests += 1 #if someone has shared something, this post will be marked as filled
elif comment_keyword not in comments: #if no one has shared anything, the post url will be added to a list
submission_title_and_poster.update({subreddit.submission.title: subreddit.submission.name})
unfilled_post_URL.append(subreddit.submission.url)
for url in unfilled_post_URL: #B64 encodes each url and adds it to a new list
text = open(url, "rb")
text_read = text.read()
url_encoded = base64.encodestring(text_read)
unfilled_post_url_B64.append(url_encoded)
unfilled_requests += 1
Schedule (https://pypi.python.org/pypi/schedule) seemed to be what you need.
You will have to install their Python library:
pip install schedule
then modify the sample script :
import schedule
import time
def job():
schedule.every(10).seconds.do(job)
schedule.every(10).minutes.do(job)
schedule.every().hour.do(job)
schedule.every().day.at("10:30").do(job)
schedule.every(5).to(10).minutes.do(job)
schedule.every().monday.do(job)
schedule.every().wednesday.at("13:15").do(job)
schedule.every().minute.at(":17").do(job)
while True:
schedule.run_pending()
time.sleep(1)
With your own function in job() and use the needed call for your timing.
Then you can run it with nohup.
Be advised that yu will need to start it again if you reboot.
here is the docs

Telegram command starts whenever another person starts it

So I don't know how to properly ask this question so it might seem kind of off, sorry about that.
I made a telegram bot that gets some images from a website and send it to your chat. However when an user calls the command the photos are also sent to the other users that have started the bot.
For instance, If User A calls the command to get the photos the bot will send it to him as well as to User B, User C and User D, all stacking together as if it were a single call to everyone using the bot
import requests
import os
from tqdm import tqdm
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin, urlparse
import re
import telebot
API_KEY = os.getenv("API_KEY")
bot = telebot.TeleBot(API_KEY)
url_mainpage = "https://url.com"
soup = bs(requests.get(url_mainpage).content, "html.parser")
full_link = soup.find("h5", class_="elementor-image-box-title")
selectlist = full_link.select(".elementor-image-box-title a")
for a in selectlist:
global lastchapterlink
lastchapterlink = a['href']
images = []
stripped_images = []
def download_last_chapter():
soup = bs(requests.get(lastchapterlink).content, "html.parser")
images_link = soup.findAll("img")
for img in images_link:
images.append(img.get("src"))
for link in images:
stripped_images.append(link.strip())
print(stripped_images)
#bot.message_handler(commands=["last_chapter"])
def send_images(message):
download_last_chapter()
for telegram_send in stripped_images:
try:
bot.send_photo(message.chat.id, photo = telegram_send)
except:
None
bot.polling()
this is the part of the code containing the bot
Per the API documentation, the bot will reply in whatever channel it sees the message in. Are your users DMing it, or posting in a shared channel that you're all part of? Also, you're not clearing stripped_images between calls-- you're just appending the new images to it.

How can I check a webscraping page with requests realtime (always), (autoupdate)? Python

I'm a fellow young programmer and I have a question about,
I have a code checking percentages on https://shadowpay.com/en?price_from=0.00&price_to=34.00&game=csgo&hot_deal=true
And I want to make it happen in real-time.
Questions:
Is there a way to make it check in real-time or is it just by refreshing the page?
if refreshing page:
How can I make it refresh the page, I saw older answers but they did not work for me because the answers only worked in their code.
(I tried to request get it every time the while loop happens, but it doesn't work, or should it?)
This is the code:
import json
import requests
import time
import plyer
import random
import copy
min_notidication_perc = 26; un = 0; us = ""; biggest_number = 0;
r = requests.get('https://api.shadowpay.com/api/market/get_items?types=[]&exteriors=[]&rarities=[]&collections=[]&item_subcategories=[]&float={"from":0,"to":1}&price_from=0.00&price_to=34.00&game=csgo&hot_deal=true&stickers=[]&count_stickers=[]&short_name=&search=&stack=false&sort=desc&sort_column=price_rate&limit=50&offset=0', timeout=3)
while True:
#Here is the place where I'm thinking of putting it
time.sleep(5); skin_list = [];perc_list = []
for i in range(len(r.json()["items"])):
perc_list.append(r.json()["items"][i]["discount"])
skin_list.append(r.json()["items"][i]["collection"]["name"])
skin = skin_list[perc_list.index(max(perc_list))]; print(skin)
biggest_number = int(max(perc_list))
if un != biggest_number or us != skin:
if int(max(perc_list)) >= min_notidication_perc:
plyer.notification.notify(
title=f'-{int(max(perc_list))}% ShadowPay',
message=f'{skin}',
app_icon="C:\\Users\\<user__name>\\Downloads\\Inipagi-Job-Seeker-Target.ico",
timeout=120,
)
else:
pass
else:
pass
us = skin;un = biggest_number
print(f'id: {random.randint(1, 99999999)}')
print(f'-{int(max(perc_list))}% discount\n')
When using requests.get() you are retrieving the page source of that link then closing it. As you are waiting on the response you don't need the time.sleep(5) line as that is handled by requests.
In order to get the real-time value you'll have to call the page again, this is where you can use time.sleep() so as not to abuse the api.

Can't get a full playlist with pafy

I'm trying to get all the urls of the videos in a playlist.
The playlist contains 700+ videos.
When I create a playlist with pafy.get_playlist it only creates an array with 194 videos, not all of them.
So is there a size limit for the playlist with pafy?
According to this issue on their github page you can just use get_playlist2()
Hello Everyone this is my first answer:)
you can use get_playlist2() as #JalxP said
just checkout the below code and run it.
I think It should solve the problem
Thanks!
import pafy
import sys
import time
url = sys.argv[1] # takes the playlist link as argument
details = pafy.get_playlist(url)
playlist = pafy.get_playlist2(url)
# below three statements print the tilte,author and number of videos
print(details['title'])
print(details['author'])
print(len(playlist))
# path to store the videos
userpath = input("enter path to save the playlist:")
# you can modify this for loop for a particular range of videos
# this a bare-bone
# this loop downloads all the videos in the playlist
for item in range(len(playlist)):
url = playlist[item].getbest()
url.download(userpath)
time.sleep(3)# just to not make immediate download calls
import os
from pafy import get_playlist
import pafy
from pafy import *
from socket import *
name_pc=gethostname()
var2=name_pc.split("-")[0] # ex: ("document-PC") -> ["document","PC"]to get #name pc
os.chdir(r"C:\Users\\"+var2+"\\Downloads")
vobj=get_playlist(str(input("Enter a URL :")))
down_path=str(input("Enter a Dir ?: "))
for video in range(len(vobj['items'])):
down=vobj['items'][video]['pafy'].getbest() # to get all items for item in playlist
if 1:
t=r"C:\Users\\"+var2+"\\Downloads"
down.download(t)
else:
down.download(down_path)

Why won't my python program print the list?

import praw
import time
from selenium import webdriver
driver = webdriver.Chrome()
r = praw.Reddit(client_id='XXXXXXXXXX',
client_secret='XXXXXXXXXXXXX', password='XXXXXXXXX',
user_agent='Grand Street Tech', username='grandstreetsupreme')
subreddit = r.subreddit('supremeclothing')
submissions = []
users = []
for submission in r.subreddit('supremeclothing').new(limit=999):
for comment in submission.comments:
author = comment.author
users.append(author)
It takes like 10 minutes to complete and when it does it doesn't do anything.
There is no print statement for the users right, put a statement like below.
print users
This is because you just created the list users, you need to tell python to print it.
After your for loop, put print users

Categories

Resources