I'm running Python 3 and I'm getting the following error:
AttributeError: 'AssemblyParser' object has no attribute 'hasMoreCommands'
Here is the code that is raising the error:
import sys
from Parser import AssemblyParser
from Code import Code
parser = AssemblyParser(sys.argv[1])
translator = Code()
out_file = str(sys.argv[1]).split(".")
out_file = str(out_file[:1]) + ".hack"
with open(out_file, 'w', encoding='utf-8') as f:
while parser.hasMoreCommands():
parser.advance()
if parser.commandType() == "A_COMMAND":
dec_num = parser.symbol()
binary = "{0:b}".format(dec_num)
elif parser.commandType() == "C_COMMAND":
default_bits = "111"
comp_bits += translator.comp(parser.comp())
dest_bits += translator.dest(parser.dest())
jump_bits += translator.jump(parser.jump())
binary = default_bits + comp_bits + dest_bits + jump_bits
assert len(binary) == 16
f.write(binary)
Here is my Parser.py file:
class AssemblyParser:
"""
Encapsulates access to the input code. Reads an assembly language command,
parses it, and provides convenient access to the command's components (fields and symbols).
In addition, removes all whitespace and comments.
"""
def __init__(self, input_file):
self.current_command = ""
self.next_command = ""
with open(input_file,"r+", encoding='utf-8') as f:
for l in f:
line = "".join(l.split()) # Remove whitespace from the line
line = line.split('//') # Removes any comments from the line
clean_line = line[0]
if clean_line.strip(): # Removes any blank lines
f.write(clean_line)
next_command = f.readline()
def __hasMoreCommands__(self):
if self.next_command:
return true
return false
def __advance__(self):
with open(input_file, encoding='utf-8') as f:
self.current_command = self.next_command
self.next_command = f.readline()
def __commandType__(self):
char_1 = self.current_command[:1]
if char_1 == "#":
return "A_COMMAND"
elif char_1 == "(":
return "L_COMMAND"
else:
return "C_COMMAND"
def __symbol__(self):
assert self.commandType() == ("A_COMMAND" or "L_COMMAND")
if self.commandType() == "A_COMMAND":
symbol = str(symbol[1:])
else:
symbol = str(symbol[1:len(symbol)-1])
return str(symbol)
def __dest__(self):
assert self.commandType() == "C_COMMAND"
if "=" in self.current_command:
temp = self.current_command.split("=")
return str(temp[:1])
else:
return ""
def __comp__(self):
assert self.commandType() == "C_COMMAND"
temp = self.current_command
if "=" in temp:
temp = temp.split("=")
temp = str(temp[1:])
if ";" in temp:
temp = temp.split(";")
temp = str(temp[:1])
return temp
def __jump__(self):
assert self.commandType() == "C_COMMAND"
if ";" in self.current_command:
temp = self.current_command.split(";")
return str(temp[1:])
else:
return ""
I really don't know why I'm getting this error, I've looked at the import documentation, but I'm getting more and more confused. I'm fairly new to Python. Can anyone explain this error?
Thanks.
Well. There seems to be no function in Parser module with name hasMoreCommand. The function in there starts with underscore and end eith underscore.
Two leading and trailing underscores are used to identify "magic" attributes. You can't use that to create your own, as they only reference pre-existing methods.
The following is what you probably want:
hasMoreCommands():
If you have multiple classes with this function, use name mangling instead:
_hasMoreCommands():
See: https://stackoverflow.com/a/8689983/2030480
And: http://www.rafekettler.com/magicmethods.html
Related
This is a .py file for pattern matching problem:
import sys
def pattern_matching(pattern,genome):
loc = []
for i in range(len(genome) - len(pattern) + 1):
if pattern == genome[i:i+len(pattern)]:
loc.append(i)
return loc
if __name__ == '__main__':
if len(sys.argv) == 2:
filename = sys.argv[1]
with open(filename) as f:
lines = f.read().splitlines()
pattern = lines[0]
genome = lines[1]
else:
pattern = 'ATAT'
genome = 'GATATATGCATATACTT'
loc = pattern_matching(pattern,genome)
print ",".join(map(str,loc))
But it's showing an error called "SyntaxError: invalid syntax" for line 22. How to print the result?
To convert to a list will be ok.
print(",".join(list(map(str,loc))))
In python2.x, map function return a list which return a map object in python3.x, so you neect to convert the return of map function to a list.
Fix the Unexpected Unindenting and encapsulate the print() statement if your on a version > python3
import sys
def pattern_matching(pattern,genome):
loc = []
for i in range(len(genome) - len(pattern) + 1):
if pattern == genome[i:i+len(pattern)]:
loc.append(i)
return loc
if __name__ == '__main__':
if len(sys.argv) == 2:
filename = sys.argv[1]
with open(filename) as f:
lines = f.read().splitlines()
pattern = lines[0]
genome = lines[1]
else:
pattern = 'ATAT'
genome = 'GATATATGCATATACTT'
loc = pattern_matching(pattern,genome)
print(",".join(map(str,loc))) # Corrected unindent and encapsulated print
Whatever is the code, i'm just searching for the way to disable some lines directly in one time by char "#".
For example, i have this code :
import json
def get(user):
try:
with open("tokens.json") as f:
data_token = json.load(f)
f.close()
return str(data_token["tokens"][user])
except:
return False
def check(user, token_key):
with open("tokens.json", 'r') as f:
data_tokens = json.load(f)
f.close()
for ids, tokens in data_tokens["tokens"].items():
if str(ids) == str(user):
if int(tokens) == int(token_key):
if __name__ == '__main__':
print(get("User1"))
And i temporary want to disable my fonction named "check()" because it makes a error right now :
import json
def get(user):
try:
with open("tokens.json") as f:
data_token = json.load(f)
f.close()
return str(data_token["tokens"][user])
except:
return False
#def check(user, token_key):
# with open("tokens.json", 'r') as f:
# data_tokens = json.load(f)
# f.close()
# for ids, tokens in data_tokens["tokens"].items():
# if str(ids) == str(user):
# if int(tokens) == int(token_key):
if __name__ == '__main__':
print(get("User1"))
I already seen some guys doing that and i wanted to know how to do.
Thanks ;)
Hello Community Members,
I am getting the error NameError: name 'f' is not defined. The code is as follows. Please help. Any sort of help is appreciated. I have been strucked onto this since 3 days. The code is all about to extract all the subcategories name of wikipedia category in Python 3.
I have tried both the relative and absolute paths.
The code is as follows:
import httplib2
from bs4 import BeautifulSoup
import subprocess
import time, wget
import os, os.path
#declarations
catRoot = "http://en.wikipedia.org/wiki/Category:"
MAX_DEPTH = 100
done = []
ignore = []
path = 'trivial'
#Removes all newline characters and replaces with spaces
def removeNewLines(in_text):
return in_text.replace('\n', ' ')
# Downloads a link into the destination
def download(link, dest):
# print link
if not os.path.exists(dest) or os.path.getsize(dest) == 0:
subprocess.getoutput('wget "' + link + '" -O "' + dest+ '"')
print ("Downloading")
def ensureDir(f):
if not os.path.exists(f):
os.mkdir(f)
# Cleans a text by removing tags
def clean(in_text):
s_list = list(in_text)
i,j = 0,0
while i < len(s_list):
#iterate until a left-angle bracket is found
if s_list[i] == '<':
if s_list[i+1] == 'b' and s_list[i+2] == 'r' and s_list[i+3] == '>':
i=i+1
print ("hello")
continue
while s_list[i] != '>':
#pop everything from the the left-angle bracket until the right-angle bracket
s_list.pop(i)
#pops the right-angle bracket, too
s_list.pop(i)
elif s_list[i] == '\n':
s_list.pop(i)
else:
i=i+1
#convert the list back into text
join_char=''
return (join_char.join(s_list))#.replace("<br>","\n")
def getBullets(content):
mainSoup = BeautifulSoup(contents, "html.parser")
# Gets empty bullets
def getAllBullets(content):
mainSoup = BeautifulSoup(str(content), "html.parser")
subcategories = mainSoup.findAll('div',attrs={"class" : "CategoryTreeItem"})
empty = []
full = []
for x in subcategories:
subSoup = BeautifulSoup(str(x))
link = str(subSoup.findAll('a')[0])
if (str(x)).count("CategoryTreeEmptyBullet") > 0:
empty.append(clean(link).replace(" ","_"))
elif (str(x)).count("CategoryTreeBullet") > 0:
full.append(clean(link).replace(" ","_"))
return((empty,full))
def printTree(catName, count):
catName = catName.replace("\\'","'")
if count == MAX_DEPTH : return
download(catRoot+catName, path)
filepath = "categories/Category:"+catName+".html"
print(filepath)
content = open('filepath', 'w+')
content.readlines()
(emptyBullets,fullBullets) = getAllBullets(content)
f.close()
for x in emptyBullets:
for i in range(count):
print (" "),
download(catRoot+x, "categories/Category:"+x+".html")
print (x)
for x in fullBullets:
for i in range(count):
print (" "),
print (x)
if x in done:
print ("Done... "+x)
continue
done.append(x)
try: printTree(x, count + 1)
except:
print ("ERROR: " + x)
name = "Cricket"
printTree(name, 0)
The error encountered is as follows.
I think f.close() should be content.close().
It's common to use a context manager for such cases, though, like this:
with open(filepath, 'w+') as content:
(emptyBullets,fullBullets) = getAllBullets(content)
Then Python will close the file for you, even in case of an exception.
(I also changed 'filepath' to filepath, which I assume is the intent here.)
First of all I'm new to Python. what I'm trying to do is to lemmatize my data from a CSV. Used pandas to read the csv.
But while running this I am getting an error on the line lemmatized.append(temp). It's saying NameError: name 'temp' is not defined
I can't figure out what is causing this error. I am using python 2.7.
I will be grateful if anyone of you python expert could help me out with this simple problem and thus help me in learning.
data = pd.read_csv('TrainingSETNEGATIVE.csv')
list = data['text'].values
def get_pos_tag(tag):
if tag.startswith('V'):
return 'v'
elif tag.startswith('N'):
return 'n'
elif tag.startswith('J'):
return 'a'
elif tag.startswith('R'):
return 'r'
else:
return 'n'
lemmatizer = WordNetLemmatizer()
with open('new_file.csv', 'w+', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for doc in list:
tok_doc = nltk.word_tokenize(doc)
pos_tag_doc = nltk.pos_tag(tok_doc)
lemmatized = []
for i in range(len(tok_doc)):
tag = get_pos_tag(pos_tag_doc[i][1])
if tag == 'r':
if tok_doc[i].endswith('ly'):
temp = tok_doc[i].replace("ly", "")
else:
temp = lemmatizer.lemmatize(tok_doc[i], pos=tag)
lemmatized.append(temp)
lemmatized = " ".join(lemmatized)
wr.writerow([lemmatized])
print(lemmatized)
Screentshot:
The Exception says it all: "name 'temp' is not defined". So the variable temp is not defined before it is used.
The problem with your code is here:
if tag == 'r':
if tok_doc[i].endswith('ly'):
temp = tok_doc[i].replace("ly", "")
# else: temp = None
else:
temp = lemmatizer.lemmatize(tok_doc[i], pos=tag)
lemmatized.append(temp)
If tag == 'r' is True and tok_doc[i].endswith('ly') is not True then temp never gets defined.
Consider adding an else clause like the one I inserted and commented out.
I've only learnt the basics of Python please forgive me but I was not able to determine the fix from the other posts. I open my JSON files with 'r' and I think I'm writing to them in r but it doesn't like that. Changing it to 'r' doesn't help :(
For the following section:
if isinstance(to_write, list):
self.log_file.write(''.join(to_write) + "<r/>")
else:
self.log_file.write(str(to_write) + "<r/>")
self.log_file.flush()
The error I get is: a bytes-like object is required, not 'str'
import math
import time
from random import randint
import json
from instagram.client import InstagramAPI
class Bot:
def __init__(self, config_file, tags_file):
# Loading the configuration file, it has the access_token, user_id and others configs
self.config = json.load(config_file)
# Loading the tags file, it will be keep up to date while the script is running
self.tags = json.load(tags_file)
# Log file to output to html the debugging info about the script
self.filename = self.config["path"] + self.config["prefix_name"] + time.strftime("%d%m%Y") + ".html"
self.log_file = open(self.filename, "wb")
# Initializing the Instagram API with our access token
self.api = InstagramAPI(access_token=self.config["access_token"], client_secret=self.config['client_secret'])
# Likes per tag rate
self.likes_per_tag = math.trunc(min(self.config["follows_per_hour"],
self.config["likes_per_hour"]) / len(self.tags["tags"]))
def save_tags(self):
j = json.dumps(self.tags, indent=4)
f = open('tags.json', 'w')
print >> f, j
f.close()
def insta_write(self, to_write):
if self.filename != self.config["path"] + self.config["prefix_name"] + time.strftime("%d%m%Y") + ".html":
self.log_file.close()
self.filename = self.config["path"] + self.config["prefix_name"] + time.strftime("%d%m%Y") + ".html"
self.log_file = open(self.filename, "wb")
if isinstance(to_write, list):
self.log_file.write(''.join(to_write) + "<r/>")
else:
self.log_file.write(str(to_write) + "<r/>")
self.log_file.flush()
def going_sleep(self, timer):
sleep = randint(timer, 2 * timer)
self.insta_write("SLEEP " + str(sleep))
time.sleep(sleep)
def like_and_follow(self, media, likes_for_this_tag):
try:
var = self.api.user_relationship(user_id=media.user.id)
if self.config["my_user_id"] != media.user.id:
self.insta_write("--------------")
self.insta_write(var)
if var.outgoing_status == 'none':
self.insta_write("LIKE RESULT:")
self.insta_write(self.api.like_media(media_id=media.id))
self.insta_write("FOLLOW RESULT:")
self.insta_write(self.api.follow_user(user_id=media.user.id))
likes_for_this_tag -= 1
self.going_sleep(self.config["sleep_timer"])
else:
self.going_sleep(self.config["sleep_timer"] / 2)
except Exception as e:
self.insta_write(str(e))
self.insta_write("GOING SLEEP 30 min")
time.sleep(1800)
self.like_and_follow(media, likes_for_this_tag)
return likes_for_this_tag
def run(self):
while True:
for tag in self.tags["tags"].keys():
tag = str(tag)
self.insta_write("--------------------")
self.insta_write("TAG: " + tag)
self.insta_write("--------------------")
self.insta_write("--------------------")
self.insta_write("DICTIONARY STATUS:")
for keys, values in self.tags["tags"].items():
self.insta_write(keys)
if values is not None:
self.insta_write(values)
likes_for_this_tag = self.likes_per_tag
while likes_for_this_tag > 0 and self.tags["tags"][tag] != 0:
if self.tags["tags"][tag] is None:
media_tag, self.tags["tags"][tag] = self.api.tag_recent_media(tag_name=tag,
count=likes_for_this_tag)
else:
media_tag, self.tags["tags"][tag] = self.api.tag_recent_media(tag_name=tag,
count=likes_for_this_tag,
max_tag_id=self.tags["tags"][tag])
self.insta_write("API CALL DONE")
if len(media_tag) == 0 or self.tags["tags"][tag] is None:
self.tags["tags"][tag] = 0
likes_for_this_tag = 0
else:
self.insta_write(self.tags["tags"][tag])
self.tags["tags"][tag] = self.tags["tags"][tag].split("&")[-1:][0].split("=")[1]
self.save_tags()
for m in media_tag:
likes_for_this_tag = self.like_and_follow(m, likes_for_this_tag)
if reduce(lambda r, h: r and h[1] == 0, self.tags["tags"].items(), True):
self.insta_write("END")
exit(1)
if __name__ == '__main__':
bot = Bot(open("config_bot.json", "r"), open("tags.json", "r"))
bot.run()
You opened the file as binary:
self.log_file = open(self.filename, "wb")
but are writing str Unicode strings to it. Either open the file in text mode (with an encoding set) or encode each string, separately.
Opening the file in text mode is easiest:
self.log_file = open(self.filename, "w", encoding="utf8")
In my case, the reason for the error was the conflict between json.load function and another function from another module w/ the same name load. Specifying explicitly which load function to use i.e. json.load, solved the problem.