chatroom quiz bot: How to repeat quiz during idle time? - python

I am working on a quit bot in Python. Now I want to know how I can repeat the question after a certain idle time.
These are my global variables:
QUIZ_FILE = 'static/questions.txt'
QUIZ_TOTAL_LINES = 29
QUIZ_TIME_LIMIT = 40
QUIZ_IDLE_LIMIT = 3000000
QUIZ_RECURSIVE_MAX = 3000000
QUIZ_CURRENT_ANSWER = {}
QUIZ_CURRENT_HINT = {}
QUIZ_CURRENT_HINT_NEW = {}
QUIZ_CURRENT_TIME = {}
QUIZ_IDLENESS = {}
QUIZ_IDLE_ANSWER = {}
QUIZ_START = {}
QUIZ_IDLE_ANSWER_FIRSR = {}
QUIZ_NOWORD = '*'
MODE = 'M1'
PTS = 'P2'
ACC = 'A2'
Bot Time settings
import threading
HELP = u'help of command > "!quiz"'
def sectomin(time):
m = 0
s = 0
if time >= 60:
m = time / 60
if (m * 60) != 0:
s = time - (m * 60)
else:
s = 0
else:
m = 0
s = time
return str(m)+u'min. in '+str(s)+u'sec.'
def quiz_timer(groupchat, start_time):
global QUIZ_TIME_LIMIT
global QUIZ_CURRENT_TIME
time.sleep(QUIZ_TIME_LIMIT)
if QUIZ_CURRENT_TIME.has_key(groupchat) and QUIZ_CURRENT_ANSWER.has_key(groupchat) and start_time == QUIZ_CURRENT_TIME[groupchat]:
QUIZ_CURRENT_ANSWER[groupchat]
msg(groupchat, u'(!) time out! ' + sectomin(QUIZ_TIME_LIMIT) + u' passed.\nCorrect answer: ' + QUIZ_CURRENT_ANSWER[groupchat])
if QUIZ_IDLENESS.has_key(groupchat):
QUIZ_IDLENESS[groupchat] += 1
else:
QUIZ_IDLENESS[groupchat] = 1
if QUIZ_IDLENESS[groupchat] >= QUIZ_IDLE_LIMIT:
msg(groupchat, u'(!) quiz will be automatically completed for inaction! ' + str(QUIZ_IDLE_LIMIT) + ' unanswered questions.')
del QUIZ_CURRENT_ANSWER[groupchat]
quiz_list_scores(groupchat)
else:
quiz_ask_question(groupchat)
Method of asking question
def quiz_ask_question(groupchat):
global answer
global QUIZ_CURRENT_TIME
global question
global QUIZ_IDLE_ANSWER
global QUIZ_IDLE_ANSWER_FIRSR
QUIZ_IDLE_ANSWER = {groupchat:{}}
(question, answer) = quiz_new_question()
QUIZ_CURRENT_ANSWER[groupchat] = answer
QUIZ_CURRENT_HINT[groupchat] = None
QUIZ_CURRENT_HINT_NEW[groupchat] = None
QUIZ_CURRENT_TIME[groupchat] = time.time()
threading.Thread(None, quiz_timer, 'gch'+str(random.randrange(0,9999)), (groupchat, QUIZ_CURRENT_TIME[groupchat])).start()
msg(groupchat, u'(?) question: \n' + question)
I want to automatically recall the question every few seconds during sleep/idle time.
I cannot make it to work so that the bot can ask questions between QUIZ_TIME_LIMIT = 40 automatically.
Code to recall question on request
def handler_quiz_resend(type, source, body):
global question
groupchat = source[1]
if QUIZ_CURRENT_ANSWER.has_key(groupchat):
res = u'(*) current question: \n'+question
reply(type, source, res)
else:
reply(type, source, u'no quiz, '+HELP)

Related

Issue while using a timer function in a threading context

While working on a game for my final project in a class I’m taking I was trying to figure out how to use a timer but still be able to interact with other running code, in my research I came across threading and it’s worked so far, but the issue is that when it comes to practice, the timer doesn’t work but it rather rapidly repeats itself and I don’t know how to slow it down like I want. I’ve tried swapping the interval number for anything else but it doesn’t matter what it is, it remains to do the same thing.
-also I understand the code is messy, I’m a student and I’m still learning. I’m sure there are much better ways to do a lot of the things I have done. Also this is my first question on Stack overflow so I hope I’m doing this right
Its on lines 129 & 130
timer = threading.Timer(1, clickers)
timer.start
import turtle as trtl
import threading
import time
wn = trtl.Screen()
wn.bgpic("clouds.png")
font_setup = ("Arial", 15, "normal")
font_setup2 = ("Arial", 20, "bold")
sun_img = "sun.gif"
wn.addshape("sun.gif")
sun = trtl.Turtle(shape = sun_img)
sun.penup()
sun.goto(0, 175)
clkr_img = "rocketship.gif"
wn.addshape("rocketship.gif")
clkr = trtl.Turtle(shape = clkr_img)
clkr.penup()
clkr.left(90)
clkr.goto(0,-110)
moneya = trtl.Turtle()
moneya.hideturtle()
moneya.penup()
moneya.goto(-180, -180)
money = 0
upg1price = trtl.Turtle()
upg1price.hideturtle()
upg1price.penup()
upg1price.goto(63,-20)
csup = trtl.Turtle()
csup.hideturtle()
csup.penup()
csup.goto(63,-50)
broke = trtl.Turtle()
broke.hideturtle()
shoptf = 0
broke.penup()
broke.goto(-170,-30)
broke.pencolor("red")
shop_img = "cart.gif"
wn.addshape("cart.gif")
shop = trtl.Turtle(shape = shop_img)
shop.shapesize(0.5)
shop.penup()
shop.goto(150,150)
back_img = "backk.gif"
wn.addshape("backk.gif")
back = trtl.Turtle(shape = back_img)
back.penup()
back.hideturtle()
back.goto(-110,135)
ugrd_img = "upgrade1.gif"
wn.addshape("upgrade1.gif")
ugrd = trtl.Turtle(shape = ugrd_img)
ugrd.penup()
ugrd.goto(0,-10)
ugrd.hideturtle()
cs_img = "clicksla.gif"
wn.addshape("clicksla.gif")
cs = trtl.Turtle(shape = cs_img)
cs.penup()
cs.goto(0, -40)
cs.hideturtle()
dmg = 5
wn.addshape("rktboom.gif")
clkrdmg = 0
price = 50
price2= 75
fmvmnt = 1
def gostore(x,y):
global price
upg1price.write("$" + str(round(price)), font=font_setup)
csup.write("$" + str(round(price2)), font=font_setup)
back.showturtle()
clkr.hideturtle()
shop.hideturtle()
ugrd.showturtle()
cs.showturtle()
sun.hideturtle()
wn.bgpic("storefront.png")
percentnumb2 = 50
percentnumb = 25
def upgrade1(x,y):
global money,dmg,price,percent,fmvmnt
if (money < price):
broke.write("You cant afford!", font=font_setup2)
time.sleep(3)
broke.clear()
else:
money = money - round(price)
moneya.clear()
moneya.write("MONEY: $" + str(money), font=font_setup)
dmg = dmg + 5
fmvmnt = fmvmnt + 1
upg1price.clear()
percent = (percentnumb*price)/100
price = round(price) + percent
upg1price.write("$" + str(round(price)), font=font_setup)
clik = 0
mor = 0
timer = None
def clickers(x,y):
global money,price2,percent2,clik,clkrdmg,timer,clkr_img
if (money < price2):
broke.write("You cant afford!", font=font_setup2)
time.sleep(3)
broke.clear()
else:
money = money - round(price2)
moneya.clear()
moneya.write("MONEY: $" + str(money), font=font_setup)
clik = clik + 1
clkrdmg = clkrdmg + 5
csup.clear()
percent2 = (percentnumb2*price2)/100
price2 = round(price2) + percent2
csup.write("$" + str(round(price2)), font=font_setup)
while clik >= 1:
money = money + clkrdmg
moneya.clear()
moneya.write("MONEY: $" + str(money), font=font_setup)
clkr.forward(1)
if (abs(clkr.ycor() - sun.ycor()) < 5):
clkr.shape("rktboom.gif")
time.sleep(1)
clkr.hideturtle()
clkr.shape("rocketship.gif")
clkr.goto(0, -100)
clkr.showturtle()
timer = threading.Timer(1, clickers)
timer.start
def gainpoint(x,y):
global money,dmg,fmvmnt
money = money + dmg
moneya.clear()
moneya.write("MONEY: $" + str(money), font=font_setup)
if (abs(clkr.ycor() - sun.ycor()) < 5):
clkr.goto(0, -100)
clkr.forward(fmvmnt)
def backclick(x,y):
wn.bgpic("clouds.png")
upg1price.clear()
csup.clear()
broke.clear()
back.hideturtle()
clkr.showturtle()
shop.showturtle()
ugrd.hideturtle()
sun.showturtle()
moneya.clear()
cs.hideturtle()
clkr.onclick(gainpoint)
cs.onclick(clickers)
shop.onclick(gostore)
back.onclick(backclick)
ugrd.onclick(upgrade1)
wn.listen()
wn.mainloop()

Confidence score of answer extracted using ELMo BiDAF model and AllenNLP

I'm working on a Deep Learning project where I use a bidirectional attention flow model (allennlp pretrained model)to make a question answering system.It uses squad dataset.The bidaf model extracts the answer span from paragraph.Is there any way to determine the confidence score(accuracy)or any other metrics of the answer extracted by the model?
I have used the subcommand evaluate from the allennlp package but it determines only score of the model after testing.I was hoping there is a much easier way to solve the issue using other such command.
Attaching the code and the terminal output below.
from rake_nltk import Rake
from string import punctuation
from nltk.corpus import stopwords
from allennlp.predictors.predictor import Predictor
import spacy
import wikipedia
import re
import requests
from requests_html import HTMLSession
from bs4 import BeautifulSoup
import traceback
from nltk.stem import SnowballStemmer
from nltk.util import ngrams
from math import log10
from flask import Flask, request, jsonify, render_template
from gevent.pywsgi import WSGIServer
import time
import multiprocessing as mp
from gtts import gTTS
import os
NLP = spacy.load('en_core_web_md')
stop = stopwords.words('english')
symbol = r"""!#$%^&*();:\n\t\\\"!\{\}\[\]<>-\?"""
stemmer = SnowballStemmer('english')
wikipedia.set_rate_limiting(True)
session = HTMLSession()
results = 5
try:
predictor = Predictor.from_path("bidaf-model-2017.09.15-charpad.tar.gz")
except:
predictor = Predictor.from_path("https://storage.googleapis.com/allennlp-public-models/bidaf-elmo-model-2018.11.30-charpad.tar.gz")
try:
srl = Predictor.from_path('srl-model-2018.05.25.tar.gz')
except:
srl = Predictor.from_path('https://s3-us-west-2.amazonaws.com/allennlp/models/bert-base-srl-2019.06.17.tar.gz')
key = Rake(min_length=1, stopwords=stop, punctuations=punctuation, max_length=6)
wh_words = "who|what|how|where|when|why|which|whom|whose|explain".split('|')
stop.extend(wh_words)
session = HTMLSession()
output = mp.Queue()
def termFrequency(term, doc):
normalizeTermFreq = re.sub('[\[\]\{\}\(\)]', '', doc.lower()).split()
normalizeTermFreq = [stemmer.stem(i) for i in normalizeTermFreq]
dl = len(normalizeTermFreq)
normalizeTermFreq = ' '.join(normalizeTermFreq)
term_in_document = normalizeTermFreq.count(term)
#len_of_document = len(normalizeTermFreq )
#normalized_tf = term_in_document / len_of_document
normalized_tf = term_in_document
return normalized_tf, normalizeTermFreq, dl#, n_unique_term
def inverseDocumentFrequency(term, allDocs):
num_docs_with_given_term = 0
for doc in allDocs:
if term in doc:
num_docs_with_given_term += 1
if num_docs_with_given_term > 0:
total_num_docs = len(allDocs)
idf_val = log10(((total_num_docs+1) / num_docs_with_given_term))
term_split = term.split()
if len(term_split) == 3:
if len([term_split[i] for i in [0, 2] if term_split[i] not in stop]) == 2:
return idf_val*1.5
return idf_val
return idf_val
else:
return 0
def sent_formation(question, answer):
tags_doc = NLP(question)
tags_doc_cased = NLP(question.title())
tags_dict_cased = {i.lower_:i.pos_ for i in tags_doc_cased}
tags_dict = {i.lower_:i.pos_ for i in tags_doc}
question_cased = []
for i in question[:-1].split():
if tags_dict[i] == 'PROPN' or tags_dict[i] == 'NOUN':
question_cased.append(i.title())
else:
question_cased.append(i.lower())
question_cased.append('?')
question_cased = ' '.join(question_cased)
#del tags_dict,tags_doc, tags_doc_cased
pre = srl.predict(question_cased)
verbs = []
arg1 = []
for i in pre['verbs']:
verbs.append(i['verb'])
if 'B-ARG1' in i['tags']:
arg1.append((i['tags'].index('B-ARG1'), i['tags'].count('I-ARG1'))\
if not pre['words'][i['tags'].index('B-ARG1')].lower() in wh_words else \
(i['tags'].index('B-ARG2'), i['tags'].count('I-ARG2')))
arg1 = arg1[0] if arg1 else []
if not arg1:
verb_idx = pre['verbs'][0]['tags'].index('B-V')
verb = pre['words'][verb_idx] if pre['words'][verb_idx] != answer.split()[0].lower() else ''
subj_uncased = pre['words'][verb_idx+1:] if pre['words'][-1] not in symbol else \
pre['words'][verb_idx+1:-1]
else:
verb = ' '.join(verbs)
subj_uncased = pre['words'][arg1[0]:arg1[0]+arg1[1]+1]
conj = ''
if question.split()[0].lower() == 'when':
conj = ' on' if len(answer.split()) > 1 else ' in'
subj = []
for n, i in enumerate(subj_uncased):
if tags_dict_cased[i.lower()] == 'PROPN' and tags_dict[i.lower()] != 'VERB' or n == 0:
subj.append(i.title())
else:
subj.append(i.lower())
subj[0] = subj[0].title()
print(subj)
print(pre)
subj = ' '.join(subj)
sent = "{} {}{} {}.".format(subj, verb, conj, answer if answer[-1] != '.' else answer[:-1])
return sent
class extractAnswer:
def __init__(self):
self.wiki_error = (wikipedia.exceptions.DisambiguationError,
wikipedia.exceptions.HTTPTimeoutError,
wikipedia.exceptions.WikipediaException)
self.article_title = None
# symbol = """!#$%^&*();:\n\t\\\"!\{\}\[\]<>-\?"""
def extractAnswer_model(self, passage, question, s=0.4, e=0.3, wiki=False):
if type(passage) == list:
passage = " ".join(passage)
if not question[-1] == '?':
question = question+'?'
pre = predictor.predict(passage=passage, question=question)
if wiki:
if max(pre['span_end_probs']) > 0.5:
s = 0.12
elif max(pre['span_end_probs']) > 0.4:
s = 0.13
elif max(pre['span_end_probs']) > 0.35:
s = 0.14
if max(pre['span_start_probs']) > 0.5:
e = 0.12
elif max(pre['span_start_probs']) > 0.4:
e = 0.14
elif max(pre['span_start_probs']) > 0.3:
e = 0.15
if max(pre['span_start_probs']) > s and max(pre['span_end_probs']) > e:
key.extract_keywords_from_text(question)
ques_key = [stemmer.stem(i) for i in ' '.join(key.get_ranked_phrases())]
key.extract_keywords_from_text(passage)
pass_key = [stemmer.stem(i) for i in ' '.join(key.get_ranked_phrases())]
l = len(ques_key)
c = 0
for i in ques_key:
if i in pass_key:
c += 1
if c >= l/2:
print(max(pre['span_start_probs']),
max(pre['span_end_probs']))
if wiki:
return pre['best_span_str'], max(pre['span_start_probs']) + max(pre['span_end_probs'])
try:
ans = sent_formation(question, pre['best_span_str'])
except:
ans = pre['best_span_str']
print(traceback.format_exc())
return ans
print(ques_key, c, l)
print(max(pre['span_start_probs']), max(pre['span_end_probs']))
return 0, 0
else:
print(max(pre['span_start_probs']), max(pre['span_end_probs']), pre['best_span_str'])
return 0, 0
def wiki_search_api(self, query):
article_list = []
try:
article_list.extend(wikipedia.search(query, results=results))
print(article_list)
return article_list
except self.wiki_error:
params = {'search': query, 'profile': 'engine_autoselect',
'format': 'json', 'limit': results}
article_list.extend(requests.get('https://en.wikipedia.org/w/api.php?action=opensearch',
params=params).json()[1])
return article_list
except:
print('Wikipedia search error!')
print(traceback.format_exc())
return 0
def wiki_passage_api(self, article_title, article_list, output):
# Disambiguation_title = {}
try:
passage = wikipedia.summary(article_title)
output.put((article_title, self.passage_pre(passage)))
except wikipedia.exceptions.DisambiguationError as e:
print(e.options[0], e.options)
Disambiguation_pass = {}
for p in range(2 if len(e.options) > 1 else len(e.options)):
params = {'search':e.options[p], 'profile':'engine_autoselect', 'format':'json'}
article_url = requests.get('https://en.wikipedia.org/w/api.php?action=opensearch',
params=params).json()
if not article_url[3]:
continue
article_url = article_url[3][0]
r = session.get(article_url)
soup = BeautifulSoup(r.html.raw_html)
print(soup.title.string)
article_title_dis = soup.title.string.rsplit('-')[0].strip()
if article_title_dis in article_list:
print('continue')
continue
try:
url = "https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles={}".format(article_title_dis)
passage = requests.get(url).json()['query']['pages']
for i in passage.keys():
if 'extract' in passage[i]:
Disambiguation_pass[article_title_dis] = self.passage_pre(passage[i]['extract'])
except wikipedia.exceptions.HTTPTimeoutError:
passage = wikipedia.summary(article_title_dis)
Disambiguation_pass[article_title_dis] = self.passage_pre(passage)
except:
Disambiguation_pass[article_title_dis] = ''
continue
output.put((article_title, Disambiguation_pass))
except:
output.put((article_title, ''))
print(traceback.format_exc())
def sorting(self, article, question, topic):
processes = [mp.Process(target=self.wiki_passage_api, args=(article[x], article, output))\
for x in range(len(article))]
for p in processes:
p.start()
for p in processes:
p.join(timeout=3)
results_p = [output.get() for p in processes]
article_list = []
passage_list = []
for i, j in results_p:
if type(j) != dict and j:
article_list.append(i)
passage_list.append(j)
elif type(j) == dict and j:
for k, l in j.items():
if l:
article_list.append(k)
passage_list.append(l)
normalize_passage_list = []
start = time.time()
keywords = " ".join(self.noun+self.ques_key+[topic.lower()])
keywords = re.sub('[{0}]'.format(symbol), ' ', keywords).split()
question = question+' '+topic
ques_tokens = [stemmer.stem(i.lower()) for i in question.split() \
if i.lower() not in wh_words]
print(ques_tokens)
keywords_bigram = [' '.join(i) for i in list(ngrams(ques_tokens, 2)) \
if i[0] not in stop and i[1] not in stop]
if len(ques_tokens) > 3:
keywords_trigram = [' '.join(i) for i in list(ngrams(ques_tokens, 3)) \
if (i[0] in stop) + (i[2] in stop) + (i[1] in stop) < 3]
else:
keywords_trigram = []
if len(ques_tokens) > 5:
keywords_4gram = [' '.join(i) for i in list(ngrams(ques_tokens, 4)) \
if (i[0] in stop) + (i[2] in stop) +(i[1] in stop)+(i[3] in stop) < 4]
else:
keywords_4gram = []
keywords_unigram = list(set([stemmer.stem(i.lower()) for i in keywords \
if i.lower() not in stop]))
keywords = keywords_unigram+list(set(keywords_bigram))+keywords_trigram+keywords_4gram
tf = []
if not passage_list:
return 0
pass_len = []
#n_u_t=[]
#key_dict = {i: keywords.count(i) for i in keywords}
print('Extraction complete')
#remove_pass={}
#for n,i in enumerate(passage_list):
#if len(i)<200 or not i:
#remove_pass[article_list[n]]=i
#print(n, article_list[n])
#passage_list=[i for i in passage_list if i not in remove_pass.values()]
#article_list=[i for i in article_list if i not in remove_pass.keys()]
passage_list_copy = passage_list.copy()
article_list_copy = article_list.copy()
for i in range(len(passage_list_copy)):
if passage_list.count(passage_list_copy[i]) > 1:
passage_list.remove(passage_list_copy[i])
article_list.remove(article_list_copy[i])
print('Copy:', article_list_copy[i])
del passage_list_copy
del article_list_copy
for n, i in enumerate(passage_list):
temp_tf = {}
c = 0
for j in keywords:
temp_tf[j], temp_pass, temp_len = termFrequency(j, i + ' ' + article_list[n])
if temp_tf[j]:
c += 1
normalize_passage_list.append(temp_pass)
pass_len.append(temp_len)
temp_tf['key_match'] = c
tf.append(temp_tf)
print(pass_len)
print(keywords)
idf = {}
for i in keywords:
idf[i] = inverseDocumentFrequency(i, normalize_passage_list)
#print(tf, idf)
tfidf = []
#b=0.333 #for PLN
b, k = 0.75, 1.2 #for BM25
avg_pass_len = sum(pass_len)/len(pass_len)
#pivot=sum(n_u_t)/len(n_u_t)
for n, i in enumerate(tf):
tf_idf = 0
#avg_tf=sum(i.values())/len(i)
key_match_ratio = i['key_match']/len(keywords)
for j in keywords:
#tf_idf+=idf[j]*((log(1+log(1+i[j])))/(1-b+(b*pass_len[n]/avg_pass_len))) #PLN
tf_idf += idf[j]*(((k+1)*i[j])/(i[j]+k*(1-b+(b*pass_len[n]/avg_pass_len)))) #BM25
tfidf.append(tf_idf*key_match_ratio)
tfidf = [i/sum(tfidf)*100 for i in tfidf if any(tfidf)]
if not tfidf:
return 0, 0, 0, 0, 0
print(tfidf)
print(article_list, len(passage_list))
if len(passage_list) > 1:
sorted_tfidf = sorted(tfidf, reverse=1)
idx1 = tfidf.index(sorted_tfidf[0])
passage1 = passage_list[idx1]
#article_title=
tfidf1 = sorted_tfidf[0]
idx2 = tfidf.index(sorted_tfidf[1])
passage2 = passage_list[idx2]
article_title = (article_list[idx1], article_list[idx2])
tfidf2 = sorted_tfidf[1]
else:
article_title = 0
tfidf2 = 0
if passage_list:
passage1 = passage_list[0]
tfidf1 = tfidf[0]
passage2 = 0
else:
passage1 = 0
passage2 = 0
tfidf1, tfidf2 = 0, 0
end = time.time()
print('TFIDF time:', end-start)
return passage1, passage2, article_title, tfidf1, tfidf2
def passage_pre(self, passage):
#passage=re.findall("[\da-zA-z\.\,\'\-\/\–\(\)]*", passage)
passage = re.sub('\n', ' ', passage)
passage = re.sub('\[[^\]]+\]', '', passage)
passage = re.sub('pronunciation', '', passage)
passage = re.sub('\\\\.+\\\\', '', passage)
passage = re.sub('{.+}', '', passage)
passage = re.sub(' +', ' ', passage)
return passage
def wiki(self, question, topic=''):
if not question:
return 0
question = re.sub(' +', ' ', question)
question = question.title()
key.extract_keywords_from_text(question)
self.ques_key = key.get_ranked_phrases()
doc = NLP(question)
self.noun = [str(i).lower() for i in doc.noun_chunks if str(i).lower() not in wh_words]
print(self.ques_key, self.noun)
question = re.sub('[{0}]'.format(symbol), ' ', question)
if not self.noun + self.ques_key:
return 0
article_list = None
question = question.lower()
if self.noun:
if len(self.noun) == 2 and len(" ".join(self.noun).split()) < 6:
#question1=question
self.noun = " ".join(self.noun).split()
if self.noun[0] in stop:
self.noun.pop(0)
self.noun = question[question.index(self.noun[0]):question.index(self.noun[-1]) \
+len(self.noun[-1])+1].split()
#del question1
print(self.noun)
article_list = self.wiki_search_api(' '.join(self.noun))
if self.ques_key and not article_list:
article_list = self.wiki_search_api(self.ques_key[0])
if not article_list:
article_list = self.wiki_search_api(' '.join(self.ques_key))
if not article_list:
print('Article not found on wikipedia.')
return 0, 0
article_list = list(set(article_list))
passage1, passage2, article_title, tfidf1, tfidf2 = self.sorting(article_list,
question, topic)
if passage1:
ans1, conf1 = self.extractAnswer_model(passage1, question, s=0.20, e=0.20, wiki=True)
else:
ans1, conf1 = 0, 0
if ans1:
conf2 = 0
if len(ans1) > 600:
print(ans1)
print('Repeat')
ans1, conf1 = self.extractAnswer_model(ans1, question, s=0.20, e=0.20, wiki=True)
threshhold = 0.3 if not ((tfidf1- tfidf2) <= 10) else 0.2
if round(tfidf1- tfidf2) < 5:
threshhold = 0
if (tfidf1- tfidf2) > 20:
threshhold = 0.35
if (tfidf1- tfidf2) > 50:
threshhold = 1
if (passage2 and conf1 < 1.5) or (tfidf1 - tfidf2) < 10:
ans2, conf2 = self.extractAnswer_model(passage2, question, s=0.20, e=0.20,
wiki=True) if passage2 else (0, 0)
title = 0
if round(conf1, 2) > round(conf2, 2) - threshhold:
print('ans1')
ans = ans1
title = article_title[0] if article_title else 0
else:
print('ans2')
title = article_title[1] if article_title else 0
ans = ans2
if not question[-1] == '?':
question = question+'?'
try:
ans = sent_formation(question, ans)
except:
print(traceback.format_exc())
print(ans, '\n', '\n', article_title)
return ans, title
extractor = extractAnswer()
app = Flask(__name__)
#app.route("/", methods=["POST", "get"])
#app.route("/ans")
def ans():
start = time.time()
question = request.args.get('question')
topic = request.args.get('topic')
passage = request.args.get('passage')
if not question:
return render_template('p.html')
if not topic:
topic = ''
if passage:
answer = extractor.extractAnswer_model(passage, question)
else:
answer, title = extractor.wiki(question, topic)
end = time.time()
if answer:
mytext = str(answer)
language = 'en'
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("welcome.mp3")
# prevName = 'welcome.mp3'
#newName = 'static/welcome.mp3'
#os.rename(prevName,newName)
return render_template('pro.html', answer=answer)
else:
return jsonify(Status='E', Answer=answer, Time=end-start)
#app.route("/audio_del/", methods=["POST", "get"])
def audio_del():
return render_template('p.html');
#app.route("/audio_play/", methods=["POST", "get"])
def audio_play():
os.system("mpg321 welcome.mp3")
return render_template('white.html')
if __name__ == "__main__":
PORT = 7091
HTTP_SERVER = WSGIServer(('0.0.0.0', PORT), app)
print('Running on',PORT, '...')
HTTP_SERVER.serve_forever()
![Output in the terminal for a question I've asked](https://i.stack.imgur.com/6pyv5.jpg)
I came across a possible solution to this after deeply looking into the output returned by the model. Although this, is probably not something you can accurately rely on, it seemed to have done the task in my case:
Note that the text answer which is "best_span_str" is always a subarray of the passage. It spans the range which is stored in "best_span".
i.e., "best_span" contains the start and end index of the answer.
Now, the output data contains a property named "span_end_probs".
"span_end_probs" contains a list of values that correspond to all the words present in the text input.
If you look closely for various inputs, the value is always maximum at one of the indexes within the starting and ending range that "best_span" contains. This value seemed to be very similar to the confidence levels that we need. Let's call this value score. All you need to do now is to try some inputs and find a suitable method to use this score as a metric.
e.g.: if you need a threshold value for some application, you can try a number of test inputs and find a value that is most accurate. In my case, this was around 0.35.
i.e. if score is lesser than 0.35, it prints answer not found and if greater than or equal 0.35, prints string in "best_span_str".
Here's my code snippet:
from allennlp.predictors.predictor import Predictor
passage = '--INPUT PASSAGE--'
question = '--INPUT QUESTION--'
predictor = Predictor.from_path("https://storage.googleapis.com/allennlp-public-models/bidaf-elmo.2021-02-11.tar.gz")
output = predictor.predict(
passage = passage,
question = question
)
score = max(output["span_end_probs"])
if score < 0.35:
print('Answer not found')
else:
print(output["best_span_str"])
You can readily see the example input and output here.

Chrome T-Rex-Game Reinforcement learning showing no improvement

I would like to create an AI for the Chrome-No-Internet-Dino-Game. Therefore I adapted this Github-Repository to fit my needs. I used the following formula to calculate the new Q:
Source: https://en.wikipedia.org/wiki/Q-learning
My problem now is that even after ~ 2.000.000 iterations my game score is not increasing.
You can find the game file here: https://pastebin.com/XrwQ0suJ
QLearning.py:
import pickle
import Game_headless
import Game
import numpy as np
from collections import defaultdict
rewardAlive = 1
rewardKill = -10000
alpha = 0.2 # Learningrate
gamma = 0.9 # Discount
Q = defaultdict(lambda: [0, 0, 0]) # 0 = Jump / 1 = Duck / 2 = Do Nothing
oldState = None
oldAction = None
gameCounter = 0
gameScores = []
def paramsToState(params):
cactus1X = round(params["cactus1X"] / 10) * 10
cactus2X = round(params["cactus2X"] / 10) * 10
cactus1Height = params["cactus1Height"]
cactus2Height = params["cactus2Height"]
pteraX = round(params["pteraX"] / 10) * 10
pteraY = params["pteraY"]
playerY = round(params["playerY"] / 10) * 10
gamespeed = params["gamespeed"]
return str(cactus1X) + "_" + str(cactus2X) + "_" + str(cactus1Height) + "_" + \
str(cactus2Height) + "_" + str(pteraX) + "_" + str(pteraY) + "_" + \
str(playerY) + "_" + str(gamespeed)
def shouldEmulateKeyPress(params): # 0 = Jump / 1 = Duck / 2 = Do Nothing
global oldState
global oldAction
state = paramsToState(params)
oldState = state
estReward = Q[state]
action = estReward.index(max(estReward))
if oldAction is None:
oldAction = action
return action
# Previous action was successful
# -> Update Q
prevReward = Q[oldState]
prevReward[oldAction] = (1 - alpha) * prevReward[oldAction] + \
alpha * (rewardAlive + gamma * max(estReward))
Q[oldState] = prevReward
oldAction = action
return action
def onGameOver(score):
# Previous action was NOT successful
# -> Update Q
global oldState
global oldAction
global gameCounter
global gameScores
gameScores.append(score)
if gameCounter % 10000 == 0:
print(f"{gameCounter} : {np.mean(gameScores[-100:])}")
prevReward = Q[oldState]
prevReward[oldAction] = (1 - alpha) * prevReward[oldAction] + \
alpha * rewardKill
Q[oldState] = prevReward
oldState = None
oldAction = None
if gameCounter % 10000 == 0:
with open("Q\\" + str(gameCounter) + ".pickle", "wb") as file:
pickle.dump(dict(Q), file)
gameCounter += 1
Game_headless.main(shouldEmulateKeyPress, onGameOver)
On every frame the gameplay() function from Game_headless.py calls shouldEmulateKeyPress(). Said function then returns 0 for Jump, 1 for duck and 2 for nothing.
I tried adjusting the constants, but that didn't show any effect.
If you any questions, please don't hesitate to ask me!
Thank you in advance!
Someone on Reddit did this, did you take a look at their code? https://www.reddit.com/r/MachineLearning/comments/8iujuu/p_tfrex_ai_learns_to_play_google_chromes_dinosaur/
I was able to fix the problem, but I don't really know what the mistake was. I added a return statement at the end the gameplay function, and somehow it works now.

PyQT table update crash easyly

i first use PyQT4 .
i'm create a QTableWidget to show runing message...
when my program run, it ill crash Within ten minutes.
i try diable my TableUpdate function , and it's don't crash again.
there is my code please help me
class table_work(QThread):
TableDataSignal = pyqtSignal()
def __init__(self,main_self):
# QThread.__init__(self)
super(table_work, self).__init__(main_self)
self.main_self = main_self
self.table_update_list = list()
#pyqtSlot(dict)
def update_table_thread_o(self,work):
try:
row_pos = work['row_position']
data = work['data']
table_key_sort = work['key_sort']
this_table = work['table']
k = 0
for table_key in table_key_sort:
this_table.setItem(row_pos, k, QTableWidgetItem(unicode(data[table_key])))
k += 1
del work
except:
pass
def update_table_thread(self):
main_self = self.main_self
table_work_list = self.table_update_list
while 1:
for work in self.table_update_list:
row_pos = work['row_position']
data = work['data']
table_key_sort = work['key_sort']
this_table = work['table']
k = 0
for table_key in table_key_sort:
this_table.setItem(row_pos, k, QTableWidgetItem(unicode(data[table_key])))
k += 1
time.sleep(0.5)
def run(self):
self.update_table_thread()
this's update table message
def update_table(self,address,change_obj=None,tabe_name='auto_card'):
sample_dict = dict()
table_key_sort = list()
now_table_sort = 0
if tabe_name == "auto_bot":
this_table = self.auto_bot_procc_table
table_data_list = self.auto_bot_procc_table_list
now_table_sort = self.auto_bot_now_table_sort
sample_dict['address'] = address
sample_dict['money'] = 0
sample_dict['run_time'] = 0
sample_dict['item_cd'] = u"60分鐘後"
sample_dict['stat'] = "Ready..."
sample_dict['sort'] = now_table_sort
table_key_sort.append('address')
table_key_sort.append('money')
table_key_sort.append('run_time')
table_key_sort.append('item_cd')
table_key_sort.append('stat')
if tabe_name == "auto_card":
this_table = self.process_table
table_data_list = self.now_procc_table_list
now_table_sort = self.now_table_sort
sample_dict['address'] = address
sample_dict['done_num'] = 0
sample_dict['pre_item'] = ""
sample_dict['procc'] = "Ready"
sample_dict['mission_procc'] = u"待命.."
sample_dict['mission_num'] = 0
sample_dict['mission_line'] = 0
sample_dict['update_time'] = db.get_time()
sample_dict['sort'] = now_table_sort
sample_dict['option'] = ""
table_key_sort.append('address')
table_key_sort.append('done_num')
table_key_sort.append('pre_item')
table_key_sort.append('mission_procc')
table_key_sort.append('procc')
table_key_sort.append('mission_num')
table_key_sort.append('mission_line')
table_key_sort.append('update_time')
if address not in table_data_list:
this_table.insertRow(sample_dict['sort'])
table_data_list[address] = sample_dict
sample_dict['sort'] = self.auto_bot_now_table_sort
self.auto_bot_now_table_sort += 1
acc_data = table_data_list[address]
if change_obj != None:
key = change_obj['key']
val = change_obj['val']
if key in acc_data:
acc_data[key] = val
acc_data['update_time'] = db.get_time()
rowPosition = acc_data['sort']
temp = dict()
temp['row_position'] = rowPosition
temp['data'] = acc_data
temp['key_sort'] = table_key_sort
temp['table'] = this_table
self.TableDataSignal.emit(temp)
del temp
Some time i get a ANS.
i'm a PYQT newbie , After this period of various projects experience.
I understand if you don't use Main Thread to Change UI, Always use sign/emit
even your code is worked,but always use sign/emit, Otherwise there will be a series of disasters.
you just like
class sample(QtCore.QThread):
table_data_change = QtCore.pyqtSignal(dict)
def __init__(self,main_win):
self.main = main_win
self.table_data_change.connect(self.main.change_fn)
def test(self):
data = dict()
data['btn'] = .....
data['val'] = .....
self.table_data_change.emit(data)
Save your time !

Python - WindowName for Compare with List

I am currently blocked on a point of a program in Python.
I wish to compare in a list, the WindowName event to launch directives.
Example:
import win32api
import pyHook
liste = ["Google", "Task"]
if event.WindowName == liste:
Screenshot ()
return True
else:
return False
Complete code, he work:
def OnMouseEvent(event):
global interval
data = '\n[' + str(time.ctime().split(' ')[3]) + ']' \
+ ' WindowName : ' + str(event.WindowName)
data += '\n\tButton:' + str(event.MessageName)
data += '\n\tClicked in (Position):' + str(event.Position)
data += '\n===================='
global t, start_time, pics_names
"""
Code Edit
"""
t = t + data
if len(t) > 300:
ScreenShot()
"""
Finish
"""
if len(t) > 500:
f = open('Logfile.txt', 'a')
f.write(t)
f.close()
t = ''
if int(time.time() - start_time) == int(interval):
Mail_it(t, pics_names)
start_time = time.time()
t = ''
return True
else:
return False
When i edit the code in """ doesn't work :
t = t + data
liste = ["Google", "Task"]
if event.WindowName == liste:
ScreenShot()
He return :
File "C:\Python26\lib\site-packages\pyHook\HookManager.py", line 324, in MouseSwitch func = self.mouse_funcs.get(msg) TypeError: an integer is required
I test this :
HookManager: func = self.keyboard_funcs.get(msg) to: func=self.keyboard_funcs.get( int(str(msg)) )
But is don't work, i think i note all problem.
Thanks for you help in advance :)

Categories

Resources