Please see the code and the error in the last.
from brisa.core.reactors import install_default_reactor
reactor = install_default_reactor()
print reactor
import os
from brisa.upnp.device import Device, Service
from brisa.upnp.device.service import StateVariable
class BinaryLight(Device):
def __init__(self):
Device.__init__(self,
'urn:schemas-upnp-org:device:BinaryLight:1',
'Binary Light device')
class SwitchPower(Service):
def __init__(self):
Service.__init__(self,
'SwitchPower',
'urn:schemas-upnp-org:service:SwitchPower:1',
'',
os.getcwd() + '/SwitchPower-scpd.xml')
self.target = False
self.status = False
self.varin = StateVariable(self, "Status",
True, False, "boolean")
self.varin.subscribe_for_update(self.varUpdateCallback)
self.add_state_variable(self.varin)
def varUpdateCallback(self, name, value):
print name, 'was updated to', value
def SetTarget(self, *args, **kwargs):
self.target = kwargs['NewTargetValue']
self.status = self.target
self.set_state_variable('Status', self.target)
print 'Light switched ', {'1': 'on', '0': 'off'}.get(self.target, None)
return {}
def GetTarget(self, *args, **kwargs):
return {'RetTargetValue': self.target}
def soap_GetStatus(self, *args, **kwargs):
return {'ResultStatus': self.status}
if __name__ == '__main__':
device = BinaryLight()
device += SwitchPower()
# Start device
device.start()
# Setup main loop
reactor.add_after_stop_func(device.stop)
reactor.main()
I am getting an error:-
ankit#ubuntu:~/Desktop$ python binary_light.py Could you please tell me where I am doing mistake??
Error:
Traceback (most recent call last):
File "binary_light.py", line 8, in <module>
from brisa.upnp.device import Device, Service
File "/usr/local/lib/python2.6/dist-packages/brisa/upnp/device/__init__.py", line 8, in <module>
from brisa.upnp.device.device import Device
File "/usr/local/lib/python2.6/dist-packages/brisa/upnp/device/device.py", line 10, in <module>
from brisa.core import log, config, webserver, network
File "/usr/local/lib/python2.6/dist-packages/brisa/core/webserver.py", line 39, in <module>
raise RuntimeError('Network is down.')
RuntimeError: Network is down.
NEW ERROR:
Traceback (most recent call last):
File "binary_light.py", line 57, in <module>
device = BinaryLight()
File "binary_light.py", line 21, in __init__
'Binary Light device')
File "/usr/local/lib/python2.6/dist-packages/brisa/upnp/device/device.py", line 83, in __init__
additional_headers=additional_ssdp_headers)
File "/usr/local/lib/python2.6/dist-packages/brisa/upnp/ssdp.py", line 71, in __init__
data_callback=self._datagram_received)
File "/usr/local/lib/python2.6/dist-packages/brisa/core/network_listeners.py", line 188, in __init__
self._create_socket(shared_socket)
File "/usr/local/lib/python2.6/dist-packages/brisa/core/network_listeners.py", line 227, in _create_socket
"Couldn't bind address")
brisa.core.network_listeners.CannotListenError
please let me know where I am doing mistake??
As others have mentioned, that's not an error. However, my guess would be that you have an error and something's going wrong somewhere or you wouldn't have bothered posting here. Try removing the print statement. Then let us know if you get any more "errors".
This is not an error. On line 3 you are printing the object "reactor".
(In response to updated question)
Well, the error suggests it can't find the network.
Looking at the code, trying doing this at the start of your script:
import brisa
brisa.__enable_offline_mode__ = True
from brisa.core.reactors import install_default_reactor
Related
I want to run parallel processing, using the class but the code gives this error :
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\MonPc\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Users\MonPc\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
This is just similar to the original code that I want to work on.
MyCode :
from multiprocessing import Process
class class1:
def __init__(self):
super().__init__()
txt = "Rachid"
p1 = class2(txt)
p1.start()
p1.join()
class class2(Process):
def __init__(self, txt):
Process.__init__(self)
self.txt = txt
def run (self):
print("*"*10)
print(self.txt)
print("*"*10)
class1()
How can I avoid getting this error ?
I need to convert this:
from qgis.core import QgsProcessing
from qgis.core import QgsProcessingAlgorithm
from qgis.core import QgsProcessingMultiStepFeedback
from qgis.core import QgsProcessingParameterVectorLayer
from qgis.core import QgsProcessingParameterFeatureSink
import processing
class Modelo(QgsProcessingAlgorithm):
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterVectorLayer('inpput', 'input_file.shp', defaultValue=None))
self.addParameter(QgsProcessingParameterFeatureSink('output', 'UF_extraido.shp', type=QgsProcessing.TypeVectorAnyGeometry, createByDefault=True, defaultValue=None))
def processAlgorithm(self, parameters, context, model_feedback):
# Use a multi-step feedback, so that individual child algorithm progress reports are adjusted for the
# overall progress through the model
feedback = QgsProcessingMultiStepFeedback(1, model_feedback)
results = {}
outputs = {}
# Extrair por expressão
alg_params = {
'EXPRESSION': '\"S_majority\" = 1',
'INPUT': parameters['input'],
'OUTPUT': parameters['output']
}
outputs['ExtrairPorExpresso'] = processing.run('native:extractbyexpression', alg_params, context=context, feedback=feedback, is_child_algorithm=True)
results['output'] = outputs['ExtrairPorExpresso']['OUTPUT']
return results
def name(self):
return 'modelo'
def displayName(self):
return 'modelo'
def group(self):
return ''
def groupId(self):
return ''
def createInstance(self):
return Modelo()
This is a custom script used in QGIS.
... to something like this:
def extract_by_expression(input_shape_file_path, output_shape_file_path):
parameters = {
'EXPRESSION': '\"S_majority\" = 1',
'INPUT': QgsProcessingParameterVectorLayer('input', input_shape_file_path, defaultValue=None),
'OUTPUT': QgsProcessingParameterFeatureSink('output', output_shape_file_path, type=QgsProcessing.TypeVectorAnyGeometry, createByDefault=True, defaultValue=None)
}
processing.run('native:extractbyexpression', parameters)
I made this for other algorithms and everything worked fine. But for this specific case, I'm getting this error:
Traceback (most recent call last):
File "/Applications/QGIS.app/Contents/MacOS/../Resources/python/code.py", line 90, in runcode
exec(code, self.locals)
File "<input>", line 1, in <module>
File "<string>", line 467, in <module>
File "<string>", line 460, in extract_by_expression
File "/Applications/QGIS.app/Contents/MacOS/../Resources/python/plugins/processing/tools/general.py", line 108, in run
return Processing.runAlgorithm(algOrName, parameters, onFinish, feedback, context)
File "/Applications/QGIS.app/Contents/MacOS/../Resources/python/plugins/processing/core/Processing.py", line 168, in runAlgorithm
raise QgsProcessingException(msg)
_core.QgsProcessingException: Unable to execute algorithm
Could not load source layer for INPUT: invalid value
The input file is OK, it's being used for other algorithms. So I guess this is not a problem with the file itself.
I'm using QGIS 3.18.1-Zürich and the idea is to be able to write my own scripts instead of doing that manually in QGIS interface.
Thanks!
I am trying to access a network Scanner to scan documents through this code. This is the library and code I am using it
import sane
sane.init()
class saneScanner(object):
def __init__(self, URI):
self.URI = URI
def __enter__(self):
self.dev = sane.open(self.URI)
return self.dev
def __exit__(self, exception_type, exception_value, traceback):
self.dev.close()
print(exception_type, exception_value, traceback)
def t():
with saneScanner("airscan:HP OfficeJet Pro 8020 series [973B68]") as k:
return k.opt
print(t())
I am ending up with this error
Traceback (most recent call last):
File "with.py", line 23, in <module>
print(t())
File "/usr/local/lib/python3.7/dist-packages/sane.py", line 86, in __repr__
curValue = repr(getattr(self.scanDev, self.py_name))
File "/usr/local/lib/python3.7/dist-packages/sane.py", line 232, in __getattr__
return d['dev'].get_option(opt.index)
_sane.error: SaneDev object is closed
If remove the return Statement from function t(), there is no error thrown. Can you please let me know where I am going wrong
Thanks in advance
I am writing a simple server program. They will be inevitable typos and other errors in fresh code, and usually the python interpreter will print a ValueError/AttributeError traceback and exit. The traceback can point to the exact position of the error. However under the twisted framework, these errors are not printed. Like in the following example:
from twisted.internet import reactor, protocol, task
#from twisted.internet.defer import setDebugging
#setDebugging(True)
class MyProtocol(protocol.Protocol):
def dataReceived(self, data):
try:
set_position(int(data))
except ValueError:
pass
def connectionMade(self):
self.factory.clientConnectionMade(self)
def connectionLost(self, reason):
self.factory.clientConnectionLost(self)
class MyFactory(protocol.Factory):
protocol = MyProtocol
def __init__(self):
self.clients = []
self.lc = task.LoopingCall(self.announce)
self.lc.start(1)
def announce(self):
pos = A_GREAT_TYPO_HERE()
for client in self.clients:
client.transport.write("Posiiton is {0}\n".format(pos).encode('utf-8'))
def clientConnectionMade(self, client):
self.clients.append(client)
def clientConnectionLost(self, client):
self.clients.remove(client)
def get_position():
return position[0]
def set_position(pos):
position[0] = pos
def main():
global position
position = [0]
myfactory = MyFactory()
reactor.listenTCP(5362, myfactory)
reactor.run()
if __name__ == "__main__":
main()
A_GREAT_TYPO_HERE() in MyFactory.announce is meant to be get_position(). But it is a typo.
And when the server is run, the terminal only outputs
Unhandled error in Deferred:
and nothing else. Even if I enable Defer debugging (uncomment the 2nd and 3rd line), the terminal outputs:
Unhandled error in Deferred:
(debug: C: Deferred was created:
C: File "nodes/test.py", line 48, in <module>
C: main()
C: File "nodes/test.py", line 43, in main
C: myfactory = MyFactory()
C: File "nodes/test.py", line 21, in __init__
C: self.lc.start(1)
C: File "/home/sgsdxzy/anaconda3/lib/python3.6/site-packages/twisted/internet/task.py", line 189, in start
C: deferred = self._deferred = defer.Deferred()
I: First Invoker was:
I: File "nodes/test.py", line 48, in <module>
I: main()
I: File "nodes/test.py", line 43, in main
I: myfactory = MyFactory()
I: File "nodes/test.py", line 21, in __init__
I: self.lc.start(1)
I: File "/home/sgsdxzy/anaconda3/lib/python3.6/site-packages/twisted/internet/task.py", line 194, in start
I: self()
I: File "/home/sgsdxzy/anaconda3/lib/python3.6/site-packages/twisted/internet/task.py", line 241, in __call__
I: d.addErrback(eb)
I: File "/home/sgsdxzy/anaconda3/lib/python3.6/site-packages/twisted/internet/defer.py", line 332, in addErrback
I: errbackKeywords=kw)
I: File "/home/sgsdxzy/anaconda3/lib/python3.6/site-packages/twisted/internet/defer.py", line 310, in addCallbacks
I: self._runCallbacks()
I: File "/home/sgsdxzy/anaconda3/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
I: current.result = callback(current.result, *args, **kw)
I: File "/home/sgsdxzy/anaconda3/lib/python3.6/site-packages/twisted/internet/task.py", line 236, in eb
I: d.errback(failure)
)
It points the error as close as to self.lc.start(1), but not A_GREAT_TYPO_HERE(). How can I debug my program so tracebacks can point to actual errors?
The "C" and "I" lines you're seeing are due to the fact that you've enabled Deferred debugging. The "C" lines give you the stack where the Deferred was created. The "I" lines give you the stack where the Deferred was "invoked" (its callback or errback method was called).
Neither of those is what you're looking for, it seems. If you want to see the stack associated with the Failure the Deferred has been fired with, the most straightforward solution is to make sure the Failure gets logged (and that you have a log observer so that you can actually see that log event).
You should add this to your main:
from sys import stdout
from twisted.logger import globalLogBeginner, textFileLogObserver
globalLogBeginner.beginLoggingTo([textFileLogObserver(stdout)])
This directs the log stream to stdout as text. It is most likely sufficient to get you the information you want. However, to be really safe, you also want to explicitly log failures instead of relying on the garbage collector to do it for you. So you also want to change:
self.lc.start(1)
To:
# Module scope
from twisted.logger import Logger
logger = Logger()
...
# in __init__
d = self.lc.start(1)
d.addErrback(lambda f: logger.failure("Loop thing problem", f))
(Also you may want to consider taking this code out of __init__ and putting it in startFactory instead; also consider not using a global reactor but instead pass it around as a parameter.)
This will give you output like:
2017-04-25T06:53:14-0400 [__main__.MyFactory#critical] Foo
Traceback (most recent call last):
File "debugging2.py", line 52, in main
myfactory = MyFactory()
File "debugging2.py", line 28, in __init__
d = self.lc.start(1)
File "/tmp/debugging/local/lib/python2.7/site-packages/twisted/internet/task.py", line 194, in start
self()
File "/tmp/debugging/local/lib/python2.7/site-packages/twisted/internet/task.py", line 239, in __call__
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
--- <exception caught here> ---
File "/tmp/debugging/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File "debugging2.py", line 32, in announce
pos = A_GREAT_TYPO_HERE()
exceptions.NameError: global name 'A_GREAT_TYPO_HERE' is not defined
I'm using Scrapy with a BloomFilter and after 10 minutes I have this error on loop :
2016-10-03 18:03:34 [twisted] CRITICAL:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/twisted/internet/task.py", line 517, in _oneWorkUnit
result = next(self._iterator)
File "/usr/local/lib/python2.7/dist-packages/scrapy/utils/defer.py", line 63, in <genexpr>
work = (callable(elem, *args, **named) for elem in iterable)
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/scraper.py", line 183, in _process_spidermw_output
self.crawler.engine.crawl(request=output, spider=spider)
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/engine.py", line 209, in crawl
self.schedule(request, spider)
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/engine.py", line 215, in schedule
if not self.slot.scheduler.enqueue_request(request):
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/scheduler.py", line 54, in enqueue_request
if not request.dont_filter and self.df.request_seen(request):
File "dirbot/custom_filters.py", line 20, in request_seen
self.fingerprints.add(fp)
File "/usr/local/lib/python2.7/dist-packages/pybloom/pybloom.py", line 182, in add
raise IndexError("BloomFilter is at capacity")
IndexError: BloomFilter is at capacity
The filter.py :
from pybloom import BloomFilter
from scrapy.utils.job import job_dir
from scrapy.dupefilters import BaseDupeFilter
class BLOOMDupeFilter(BaseDupeFilter):
"""Request Fingerprint duplicates filter"""
def __init__(self, path=None):
self.file = None
self.fingerprints = BloomFilter(2000000, 0.00001)
#classmethod
def from_settings(cls, settings):
return cls(job_dir(settings))
def request_seen(self, request):
fp = request.url
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
def close(self, reason):
self.fingerprints = None
I search on Google every possibilities but nothing work.
Thank's for your help.
Use pybloom.ScalableBloomFilter instead of BloomFilter.
from pybloom import ScalableBloomFilter
from scrapy.utils.job import job_dir
from scrapy.dupefilters import BaseDupeFilter
class BLOOMDupeFilter(BaseDupeFilter):
"""Request Fingerprint duplicates filter"""
def __init__(self,
path=None,
initial_capacity=2000000,
error_rate=0.00001,
mode=ScalableBloomFilter.SMALL_SET_GROWTH):
self.file = None
self.fingerprints = ScalableBloomFilter(
initial_capacity, error_rate, mode)