How to debug "NameError: global name 'X' is not defined" in Python? I am pretty much new in Python. I am using jupyter_notebook with Python 2.7 to execute code. I am facing following error.
My code:
logFile = "NASAlog.txt"
def parseLogs():
parsed_logs=(sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: %d' % failed_logs.count()
for line in failed_logs.take(20):
print 'Invalid logline: %s' % line
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (parsed_logs.count(), access_logs.count(), failed_logs.count())
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
ERROR
> NameError Traceback (most recent call last)
> <ipython-input-18-b365aa793252> in <module>()
> 24 return parsed_logs, access_logs, failed_logs
> 25
> ---> 26 parsed_logs, access_logs, failed_logs = parseLogs()
>
> <ipython-input-18-b365aa793252> in parseLogs()
> 2
> 3 def parseLogs():
> ----> 4 parsed_logs=(sc
> 5 .textFile(logFile)
> 6 .map(parseApacheLogLine)
>
> NameError: global name 'sc' is not defined
The problem is that you did never define sc. Therefore python can't find it. (Makes sense, doesn't it?)
Now there are several possible reasons:
- python is case-sensitive. Did you somewhere define SC instead of sc? ... Or Sc instead of sc?
You defined sc in another function (-> you defined it in a function outside parseLogs()). If you only define it there the variable will be local and just be available to the code inside the function. Add the line global sc to the first line of your function to make it accessible everywhere in you whole code.
You simply did not define sc.
I've been trying to use the IDAPython API to adjust the compiler setting in a script, but I can't seem to get any function to work properly. Some of the things I've attempted:
1.
Python>SetLongPrm(INF_COMPILER, COMP_MS)
This leaves me with the compiler id set to the right value, but for some reason it sets all the other compiler related values to 0 or something similar. Giving me an error about the Pointer Size not being right and int size not being a valid value.
2.
Python>idaapi.set_compiler_id(2)
False
This just straight up doesn't work, but this would probably end up the same as the first command.
3.
class compiler_info_t(object):
id = COMP_MS
cm = 0x3 | 0x00 | 0x30
size_i = 4
size_b = 1
size_e = 4
defalign = 0
size_s = 2
size_l = 4
size_ll = 8
def __init__(self, *args):
"""
__init__(self) -> compiler_info_t
"""
this = _idaapi.new_compiler_info_t(*args)
try: self.this.append(this)
except: self.this = this
My last attempt was to try and make my own compiler_info_t object to pass to idaapi.set_compiler(), but since "_idaapi" isn't a module i can import normally it won't let me call new_compiler_info_t().
Question:
Is there a way to, perhaps, individually set/fix the compiler values for pointer size, memory model, and calling convention?
If not, is there a different way to completely adjust the compiler, analogous to how it would function if you changed it by hand in the compiler settings window?
Here is my example: "Set Compiler defaults for Visual C++"
def print_compiler(id):
print '-'*(80)
abbr = ida_typeinf.get_compiler_abbr(id)
name = ida_typeinf.get_compiler_name(id)
print "id: %d (%s)" % (id,abbr)
print "Compiler: '%s'" % name
im = idc.get_inf_attr(INF_COMPILER)
print "Calling model: %02X" % im.cm
print "Defauil alignments: %d" % im.defalign
print "sizeof(int): %d\tsizeof(short): %d" % (im.size_i,im.size_s)
print "sizeof(bool): %d\tsizeof(long): %d" % (im.size_b,im.size_l)
print "sizeof(enum): %d\tsizeof(longlong): %d" % (im.size_e,im.size_ll)
print "sizeof(long double): %d" % (im.size_ldbl)
print "Predefined macros: '%s'" % ida_idp.cfg_get_cc_predefined_macros(id)
print "Included directories: '%s'" % ida_idp.cfg_get_cc_header_path(id)
print '-'*(80)
# Print Old Compiler settings by ID
print_compiler(idc.get_inf_attr(INF_COMPILER).id)
# Set Compiler defaults for Visual C++
im = idc.get_inf_attr(INF_COMPILER) # Get current settings
im.id = ida_typeinf.COMP_MS
im.cm = 0x03 | 0x00 | 0x30
im.defalign = 0
im.size_i = 4
im.size_b = 1
im.size_e = 4
im.size_s = 2
im.size_l = 4
im.size_ll = 8
im.size_ldbl = 8
# Replace predefined macros and included directories by id
# from IDA.CFG (see 'CC_PARMS' in Built-in C parser parameters)
ida_typeinf.set_c_macros(ida_idp.cfg_get_cc_predefined_macros(im.id))
ida_typeinf.set_c_header_path(ida_idp.cfg_get_cc_header_path(im.id))
# Resetting new settings :)
idc.set_inf_attr(INF_COMPILER, im.id)
# Print New Compiler settings by ID
print_compiler(im.id)
From the code mostly from the sample of myhdl:
from myhdl import Signal, intbv, delay, always, now, Simulation, toVerilog
__debug = True
def ClkDriver(clk):
halfPeriod = delay(10)
#always(halfPeriod)
def driveClk():
clk.next = not clk
return driveClk
def HelloWorld(clk, outs):
counts = intbv(3)[32:]
#always(clk.posedge)
def sayHello():
outs.next = not outs
if counts >= 3 - 1:
counts.next = 0
else:
counts.next = counts + 1
if __debug__:
print "%s Hello World! outs %s %s" % (
now(), str(outs), str(outs.next))
return sayHello
clk = Signal(bool(0))
outs = Signal(intbv(0)[1:])
clkdriver_inst = ClkDriver(clk)
hello_inst = toVerilog(HelloWorld, clk, outs)
sim = Simulation(clkdriver_inst, hello_inst)
sim.run(150)
I expect it to generate a verilog program that contains an initial block, like something:
module HelloWorld(...)
reg [31:0] counts;
initial begin
counts = 32'h3
end
always #(...
How can you get the initial block generated?
Note that on the google cache for old.myhdl.org/doku.php/dev:initial_values it links to example https://bitbucket.org/cfelton/examples/src/tip/ramrom/ . So it looks the feature should be supported. However the rom sample generates static case statements. That's not what I'm looking for.
Three steps to resolve it:
Update to the latest myhdl on master or a version that contains the hash 87784ad which added the feature under issue #105 or #150. As an example for virtualenv, run a git clone, followed by pip install -e <path-to-myhdl-dir>.
Change the signal to a list.
Set toVerilog.initial_values=True before calling toVerilog.
Code snippet follows.
def HelloWorld(clk, outs):
counts = [Signal(intbv(3)[32:])]
#always(clk.posedge)
def sayHello():
outs.next = not outs
if counts[0] >= 3 - 1:
counts[0].next = 0
else:
counts[0].next = counts[0] + 1
if __debug__:
print "%s Hello World! outs %s %s %d" % (
now(), str(outs), str(outs.next), counts[0])
return sayHello
clk = Signal(bool(0))
outs = Signal(intbv(0)[1:])
clkdriver_inst = ClkDriver(clk)
toVerilog.initial_values=True
hello_inst = toVerilog(HelloWorld, clk, outs)
sim = Simulation(clkdriver_inst, hello_inst)
sim.run(150)
I've recently become interested in algorithms and have begun exploring them by writing a naive implementation and then optimizing it in various ways.
I'm already familiar with the standard Python module for profiling runtime (for most things I've found the timeit magic function in IPython to be sufficient), but I'm also interested in memory usage so I can explore those tradeoffs as well (e.g. the cost of caching a table of previously computed values versus recomputing them as needed). Is there a module that will profile the memory usage of a given function for me?
Python 3.4 includes a new module: tracemalloc. It provides detailed statistics about which code is allocating the most memory. Here's an example that displays the top three lines allocating memory.
from collections import Counter
import linecache
import os
import tracemalloc
def display_top(snapshot, key_type='lineno', limit=3):
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f KiB"
% (index, filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
tracemalloc.start()
counts = Counter()
fname = '/usr/share/dict/american-english'
with open(fname) as words:
words = list(words)
for word in words:
prefix = word[:3]
counts[prefix] += 1
print('Top prefixes:', counts.most_common(3))
snapshot = tracemalloc.take_snapshot()
display_top(snapshot)
And here are the results:
Top prefixes: [('con', 1220), ('dis', 1002), ('pro', 809)]
Top 3 lines
#1: scratches/memory_test.py:37: 6527.1 KiB
words = list(words)
#2: scratches/memory_test.py:39: 247.7 KiB
prefix = word[:3]
#3: scratches/memory_test.py:40: 193.0 KiB
counts[prefix] += 1
4 other: 4.3 KiB
Total allocated size: 6972.1 KiB
When is a memory leak not a leak?
That example is great when the memory is still being held at the end of the calculation, but sometimes you have code that allocates a lot of memory and then releases it all. It's not technically a memory leak, but it's using more memory than you think it should. How can you track memory usage when it all gets released? If it's your code, you can probably add some debugging code to take snapshots while it's running. If not, you can start a background thread to monitor memory usage while the main thread runs.
Here's the previous example where the code has all been moved into the count_prefixes() function. When that function returns, all the memory is released. I also added some sleep() calls to simulate a long-running calculation.
from collections import Counter
import linecache
import os
import tracemalloc
from time import sleep
def count_prefixes():
sleep(2) # Start up time.
counts = Counter()
fname = '/usr/share/dict/american-english'
with open(fname) as words:
words = list(words)
for word in words:
prefix = word[:3]
counts[prefix] += 1
sleep(0.0001)
most_common = counts.most_common(3)
sleep(3) # Shut down time.
return most_common
def main():
tracemalloc.start()
most_common = count_prefixes()
print('Top prefixes:', most_common)
snapshot = tracemalloc.take_snapshot()
display_top(snapshot)
def display_top(snapshot, key_type='lineno', limit=3):
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f KiB"
% (index, filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
main()
When I run that version, the memory usage has gone from 6MB down to 4KB, because the function released all its memory when it finished.
Top prefixes: [('con', 1220), ('dis', 1002), ('pro', 809)]
Top 3 lines
#1: collections/__init__.py:537: 0.7 KiB
self.update(*args, **kwds)
#2: collections/__init__.py:555: 0.6 KiB
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
#3: python3.6/heapq.py:569: 0.5 KiB
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
10 other: 2.2 KiB
Total allocated size: 4.0 KiB
Now here's a version inspired by another answer that starts a second thread to monitor memory usage.
from collections import Counter
import linecache
import os
import tracemalloc
from datetime import datetime
from queue import Queue, Empty
from resource import getrusage, RUSAGE_SELF
from threading import Thread
from time import sleep
def memory_monitor(command_queue: Queue, poll_interval=1):
tracemalloc.start()
old_max = 0
snapshot = None
while True:
try:
command_queue.get(timeout=poll_interval)
if snapshot is not None:
print(datetime.now())
display_top(snapshot)
return
except Empty:
max_rss = getrusage(RUSAGE_SELF).ru_maxrss
if max_rss > old_max:
old_max = max_rss
snapshot = tracemalloc.take_snapshot()
print(datetime.now(), 'max RSS', max_rss)
def count_prefixes():
sleep(2) # Start up time.
counts = Counter()
fname = '/usr/share/dict/american-english'
with open(fname) as words:
words = list(words)
for word in words:
prefix = word[:3]
counts[prefix] += 1
sleep(0.0001)
most_common = counts.most_common(3)
sleep(3) # Shut down time.
return most_common
def main():
queue = Queue()
poll_interval = 0.1
monitor_thread = Thread(target=memory_monitor, args=(queue, poll_interval))
monitor_thread.start()
try:
most_common = count_prefixes()
print('Top prefixes:', most_common)
finally:
queue.put('stop')
monitor_thread.join()
def display_top(snapshot, key_type='lineno', limit=3):
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f KiB"
% (index, filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
main()
The resource module lets you check the current memory usage, and save the snapshot from the peak memory usage. The queue lets the main thread tell the memory monitor thread when to print its report and shut down. When it runs, it shows the memory being used by the list() call:
2018-05-29 10:34:34.441334 max RSS 10188
2018-05-29 10:34:36.475707 max RSS 23588
2018-05-29 10:34:36.616524 max RSS 38104
2018-05-29 10:34:36.772978 max RSS 45924
2018-05-29 10:34:36.929688 max RSS 46824
2018-05-29 10:34:37.087554 max RSS 46852
Top prefixes: [('con', 1220), ('dis', 1002), ('pro', 809)]
2018-05-29 10:34:56.281262
Top 3 lines
#1: scratches/scratch.py:36: 6527.0 KiB
words = list(words)
#2: scratches/scratch.py:38: 16.4 KiB
prefix = word[:3]
#3: scratches/scratch.py:39: 10.1 KiB
counts[prefix] += 1
19 other: 10.8 KiB
Total allocated size: 6564.3 KiB
If you're on Linux, you may find /proc/self/statm more useful than the resource module.
This one has been answered already here: Python memory profiler
Basically you do something like that (cited from Guppy-PE):
>>> from guppy import hpy; h=hpy()
>>> h.heap()
Partition of a set of 48477 objects. Total size = 3265516 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 25773 53 1612820 49 1612820 49 str
1 11699 24 483960 15 2096780 64 tuple
2 174 0 241584 7 2338364 72 dict of module
3 3478 7 222592 7 2560956 78 types.CodeType
4 3296 7 184576 6 2745532 84 function
5 401 1 175112 5 2920644 89 dict of class
6 108 0 81888 3 3002532 92 dict (no owner)
7 114 0 79632 2 3082164 94 dict of type
8 117 0 51336 2 3133500 96 type
9 667 1 24012 1 3157512 97 __builtin__.wrapper_descriptor
<76 more rows. Type e.g. '_.more' to view.>
>>> h.iso(1,[],{})
Partition of a set of 3 objects. Total size = 176 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 1 33 136 77 136 77 dict (no owner)
1 1 33 28 16 164 93 list
2 1 33 12 7 176 100 int
>>> x=[]
>>> h.iso(x).sp
0: h.Root.i0_modules['__main__'].__dict__['x']
>>>
If you only want to look at the memory usage of an object, (answer to other question)
There is a module called Pympler which contains the asizeof
module.
Use as follows:
from pympler import asizeof
asizeof.asizeof(my_object)
Unlike sys.getsizeof, it works for your self-created objects.
>>> asizeof.asizeof(tuple('bcd'))
200
>>> asizeof.asizeof({'foo': 'bar', 'baz': 'bar'})
400
>>> asizeof.asizeof({})
280
>>> asizeof.asizeof({'foo':'bar'})
360
>>> asizeof.asizeof('foo')
40
>>> asizeof.asizeof(Bar())
352
>>> asizeof.asizeof(Bar().__dict__)
280
>>> help(asizeof.asizeof)
Help on function asizeof in module pympler.asizeof:
asizeof(*objs, **opts)
Return the combined size in bytes of all objects passed as positional arguments.
Disclosure:
Applicable on Linux only
Reports memory used by the current process as a whole, not individual functions within
But nice because of its simplicity:
import resource
def using(point=""):
usage=resource.getrusage(resource.RUSAGE_SELF)
return '''%s: usertime=%s systime=%s mem=%s mb
'''%(point,usage[0],usage[1],
usage[2]/1024.0 )
Just insert using("Label") where you want to see what's going on. For example
print(using("before"))
wrk = ["wasting mem"] * 1000000
print(using("after"))
>>> before: usertime=2.117053 systime=1.703466 mem=53.97265625 mb
>>> after: usertime=2.12023 systime=1.70708 mem=60.8828125 mb
Below is a simple function decorator which allows to track how much memory the process consumed before the function call, after the function call, and what is the difference:
import time
import os
import psutil
def elapsed_since(start):
return time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
def get_process_memory():
process = psutil.Process(os.getpid())
mem_info = process.memory_info()
return mem_info.rss
def profile(func):
def wrapper(*args, **kwargs):
mem_before = get_process_memory()
start = time.time()
result = func(*args, **kwargs)
elapsed_time = elapsed_since(start)
mem_after = get_process_memory()
print("{}: memory before: {:,}, after: {:,}, consumed: {:,}; exec time: {}".format(
func.__name__,
mem_before, mem_after, mem_after - mem_before,
elapsed_time))
return result
return wrapper
Here is my blog which describes all the details. (archived link)
Since the accepted answer and also the next highest voted answer have, in my opinion, some problems, I'd like to offer one more answer that is based closely on Ihor B.'s answer with some small but important modifications.
This solution allows you to run profiling on either by wrapping a function call with the profile function and calling it, or by decorating your function/method with the #profile decorator.
The first technique is useful when you want to profile some third-party code without messing with its source, whereas the second technique is a bit "cleaner" and works better when you are don't mind modifying the source of the function/method you want to profile.
I've also modified the output, so that you get RSS, VMS, and shared memory. I don't care much about the "before" and "after" values, but only the delta, so I removed those (if you're comparing to Ihor B.'s answer).
Profiling code
# profile.py
import time
import os
import psutil
import inspect
def elapsed_since(start):
#return time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
elapsed = time.time() - start
if elapsed < 1:
return str(round(elapsed*1000,2)) + "ms"
if elapsed < 60:
return str(round(elapsed, 2)) + "s"
if elapsed < 3600:
return str(round(elapsed/60, 2)) + "min"
else:
return str(round(elapsed / 3600, 2)) + "hrs"
def get_process_memory():
process = psutil.Process(os.getpid())
mi = process.memory_info()
return mi.rss, mi.vms, mi.shared
def format_bytes(bytes):
if abs(bytes) < 1000:
return str(bytes)+"B"
elif abs(bytes) < 1e6:
return str(round(bytes/1e3,2)) + "kB"
elif abs(bytes) < 1e9:
return str(round(bytes / 1e6, 2)) + "MB"
else:
return str(round(bytes / 1e9, 2)) + "GB"
def profile(func, *args, **kwargs):
def wrapper(*args, **kwargs):
rss_before, vms_before, shared_before = get_process_memory()
start = time.time()
result = func(*args, **kwargs)
elapsed_time = elapsed_since(start)
rss_after, vms_after, shared_after = get_process_memory()
print("Profiling: {:>20} RSS: {:>8} | VMS: {:>8} | SHR {"
":>8} | time: {:>8}"
.format("<" + func.__name__ + ">",
format_bytes(rss_after - rss_before),
format_bytes(vms_after - vms_before),
format_bytes(shared_after - shared_before),
elapsed_time))
return result
if inspect.isfunction(func):
return wrapper
elif inspect.ismethod(func):
return wrapper(*args,**kwargs)
Example usage, assuming the above code is saved as profile.py:
from profile import profile
from time import sleep
from sklearn import datasets # Just an example of 3rd party function call
# Method 1
run_profiling = profile(datasets.load_digits)
data = run_profiling()
# Method 2
#profile
def my_function():
# do some stuff
a_list = []
for i in range(1,100000):
a_list.append(i)
return a_list
res = my_function()
This should result in output similar to the below:
Profiling: <load_digits> RSS: 5.07MB | VMS: 4.91MB | SHR 73.73kB | time: 89.99ms
Profiling: <my_function> RSS: 1.06MB | VMS: 1.35MB | SHR 0B | time: 8.43ms
A couple of important final notes:
Keep in mind, this method of profiling is only going to be approximate, since lots of other stuff might be happening on the machine. Due to garbage collection and other factors, the deltas might even be zero.
For some unknown reason, very short function calls (e.g. 1 or 2 ms)
show up with zero memory usage. I suspect this is some limitation of
the hardware/OS (tested on basic laptop with Linux) on how often
memory statistics are updated.
To keep the examples simple, I didn't use any function arguments, but they should work as one would expect, i.e.
profile(my_function, arg) to profile my_function(arg)
A simple example to calculate the memory usage of a block of codes / function using memory_profile, while returning result of the function:
import memory_profiler as mp
def fun(n):
tmp = []
for i in range(n):
tmp.extend(list(range(i*i)))
return "XXXXX"
calculate memory usage before running the code then calculate max usage during the code:
start_mem = mp.memory_usage(max_usage=True)
res = mp.memory_usage(proc=(fun, [100]), max_usage=True, retval=True)
print('start mem', start_mem)
print('max mem', res[0][0])
print('used mem', res[0][0]-start_mem)
print('fun output', res[1])
calculate usage in sampling points while running function:
res = mp.memory_usage((fun, [100]), interval=.001, retval=True)
print('min mem', min(res[0]))
print('max mem', max(res[0]))
print('used mem', max(res[0])-min(res[0]))
print('fun output', res[1])
Credits: #skeept
maybe it help:
<see additional>
pip install gprof2dot
sudo apt-get install graphviz
gprof2dot -f pstats profile_for_func1_001 | dot -Tpng -o profile.png
def profileit(name):
"""
#profileit("profile_for_func1_001")
"""
def inner(func):
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
# Note use of name from outer scope
prof.dump_stats(name)
return retval
return wrapper
return inner
#profileit("profile_for_func1_001")
def func1(...)
I have a Pi Model A, running the latest version of Raspbian. Plugged into it is an ADC-Pi (https://www.abelectronics.co.uk/products/3/Raspberry-Pi/17/ADC-Pi-V2---Raspberry-Pi-Analogue-to-Digital-converter) with various analog sensors.
Running the demo code (which is below)
If I use './adc_demo.py' it works fine
If I use 'sudo python3 adc_demo.py' I get the error 'Import error: No module named quick2wire.i2c'.
What can I do so I can run it using the latter statement? I have another script that runs a motor through the GPIO pins on the pi, and that needs to be ran as root- and I'm trying to merge the two scripts together.
adc_demo.py
#!/usr/bin/env python3
# read abelectronics ADC Pi board inputs
# uses quick2wire from http://quick2wire.com/
# See http://elinux.org/RPi_ADC_I2C_Python for full setup instructions
import quick2wire.i2c as i2c
import re
import time
adc_address1 = 0x68
adc_address2 = 0x69
adc_channel1 = 0x98
adc_channel2 = 0xB8
adc_channel3 = 0xD8
adc_channel4 = 0xF8
for line in open('/proc/cpuinfo').readlines():
m = re.match('(.*?)\s*:\s*(.*)', line)
if m:
(name, value) = (m.group(1), m.group(2))
if name == "Revision":
if value [-4:] in ('0002', '0003'):
i2c_bus = 0
else:
i2c_bus = 1
break
with i2c.I2CMaster(i2c_bus) as bus:
def getadcreading(address, channel):
bus.transaction(i2c.writing_bytes(address, channel))
time.sleep(0.05)
h, l, r = bus.transaction(i2c.reading(address,3))[0]
time.sleep(0.05)
h, l, r = bus.transaction(i2c.reading(address,3))[0]
t = (h << 8) | l
v = t * 0.000154
if v < 5.5:
return v
else: # must be a floating input
return 0.00
while True:
print("1: %f" % getadcreading(adc_address1, adc_channel1))
print("2: %f" % getadcreading(adc_address1, adc_channel2))
print("3: %f" % getadcreading(adc_address1, adc_channel3))
print("4: %f" % getadcreading(adc_address1, adc_channel4))
print("5: %f" % getadcreading(adc_address2, adc_channel1))
print("6: %f" % getadcreading(adc_address2, adc_channel2))
print("7: %f" % getadcreading(adc_address2, adc_channel3))
print("8: %f" % getadcreading(adc_address2, adc_channel4))
time.sleep(1)