Plone hangs on Python's waiter.acquire() and uses 100% CPU - python

We are experience some bottle-necking in our Plone instance when content managers perform actions such as Check out, Edit, Workflow transitions, and so on. So far we have not been able to isolate anything specific, and it happens intermittently.
For my own testing, I stuck to the "Check out" action, and I am using Products.LongRequestLogger in my troubleshooting. I am seeing the action take anywhere from 8 seconds to 78 seconds in the logged results as well as the user experience. Here is the traceback that is repeated in the LongRequestLogger:
Traceback:
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/ZServer/PubCore/ZServerPublisher.py", line 31, in __init__
response=b)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/ZPublisher/Publish.py", line 455, in publish_module
environ, debug, request, response)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.LongRequestLogger-1.1.0-py2.7.egg/Products/LongRequestLogger/patch.py", line 16, in wrapper
result = wrapper.original(*args, **kw)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/ZPublisher/Publish.py", line 249, in publish_module_standard
response = publish(request, module_name, after_list, debug=debug)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/ZPublisher/Publish.py", line 138, in publish
request, bind=1)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/ZPublisher/mapply.py", line 77, in mapply
if debug is not None: return debug(object,args,context)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/ZPublisher/Publish.py", line 48, in call_object
result=apply(object,args) # Type s<cr> to step into published object.
File "/var/db/zope/plone43/zeocluster/src/plone.app.iterate/plone/app/iterate/browser/checkout.py", line 77, in __call__
wc = policy.checkout(locator())
File "/var/db/zope/plone43/zeocluster/src/plone.app.iterate/plone/app/iterate/policy.py", line 65, in checkout
working_copy, relation = copier.copyTo( container )
File "/var/db/zope/plone43/zeocluster/src/plone.app.iterate/plone/app/iterate/copier.py", line 54, in copyTo
wc = self._copyBaseline( container )
File "/var/db/zope/plone43/zeocluster/src/plone.app.iterate/plone/app/iterate/copier.py", line 211, in _copyBaseline
result = container.manage_pasteObjects( clipboard )
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/OFS/CopySupport.py", line 235, in manage_pasteObjects
self._setObject(id, ob)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.BTreeFolder2-2.13.3-py2.7.egg/Products/BTreeFolder2/BTreeFolder2.py", line 455, in _setObject
notify(ObjectAddedEvent(ob, self, id))
File "/var/db/zope/plone43/buildout-cache/eggs/zope.event-3.5.2-py2.7.egg/zope/event/__init__.py", line 31, in notify
subscriber(event)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/event.py", line 24, in dispatch
zope.component.subscribers(event, None)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/_api.py", line 136, in subscribers
return sitemanager.subscribers(objects, interface)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/registry.py", line 321, in subscribers
return self.adapters.subscribers(objects, provided)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.interface-4.1.0-py2.7-linux-x86_64.egg/zope/interface/adapter.py", line 601, in subscribers
subscription(*objects)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/event.py", line 32, in objectEventNotify
zope.component.subscribers((event.object, event), None)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/_api.py", line 136, in subscribers
return sitemanager.subscribers(objects, interface)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/registry.py", line 321, in subscribers
return self.adapters.subscribers(objects, provided)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.interface-4.1.0-py2.7-linux-x86_64.egg/zope/interface/adapter.py", line 601, in subscribers
subscription(*objects)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/OFS/subscribers.py", line 110, in dispatchObjectMovedEvent
callManageAfterAdd(ob, event.object, event.newParent)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/OFS/subscribers.py", line 143, in callManageAfterAdd
ob.manage_afterAdd(item, container)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.ATContentTypes-2.1.13-py2.7.egg/Products/ATContentTypes/content/document.py", line 196, in manage_afterAdd
ATCTContent.manage_afterAdd(self, item, container)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.Archetypes-1.9.4-py2.7.egg/Products/Archetypes/BaseContent.py", line 43, in manage_afterAdd
BaseObject.manage_afterAdd(self, item, container)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.Archetypes-1.9.4-py2.7.egg/Products/Archetypes/BaseObject.py", line 158, in manage_afterAdd
Referenceable.manage_afterAdd(self, item, container)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.Archetypes-1.9.4-py2.7.egg/Products/Archetypes/Referenceable.py", line 245, in manage_afterAdd
r.set(self, rrefs[r.getName()])
File "/var/db/zope/plone43/buildout-cache/eggs/Products.Archetypes-1.9.4-py2.7.egg/Products/Archetypes/Field.py", line 1987, in set
tool.addReference(instance, uid, self.relationship, **addRef_kw)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.Archetypes-1.9.4-py2.7.egg/Products/Archetypes/ReferenceEngine.py", line 340, in addReference
annotation._setObject(rID, referenceObject)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/OFS/ObjectManager.py", line 359, in _setObject
notify(ObjectAddedEvent(ob, self, id))
File "/var/db/zope/plone43/buildout-cache/eggs/zope.event-3.5.2-py2.7.egg/zope/event/__init__.py", line 31, in notify
subscriber(event)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/event.py", line 24, in dispatch
zope.component.subscribers(event, None)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/_api.py", line 136, in subscribers
return sitemanager.subscribers(objects, interface)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/registry.py", line 321, in subscribers
return self.adapters.subscribers(objects, provided)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.interface-4.1.0-py2.7-linux-x86_64.egg/zope/interface/adapter.py", line 601, in subscribers
subscription(*objects)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/event.py", line 32, in objectEventNotify
zope.component.subscribers((event.object, event), None)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/_api.py", line 136, in subscribers
return sitemanager.subscribers(objects, interface)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.component-3.9.5-py2.7.egg/zope/component/registry.py", line 321, in subscribers
return self.adapters.subscribers(objects, provided)
File "/var/db/zope/plone43/buildout-cache/eggs/zope.interface-4.1.0-py2.7-linux-x86_64.egg/zope/interface/adapter.py", line 601, in subscribers
subscription(*objects)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/OFS/subscribers.py", line 110, in dispatchObjectMovedEvent
callManageAfterAdd(ob, event.object, event.newParent)
File "/var/db/zope/plone43/buildout-cache/eggs/Zope2-2.13.21-py2.7.egg/OFS/subscribers.py", line 143, in callManageAfterAdd
ob.manage_afterAdd(item, container)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.Archetypes-1.9.4-py2.7.egg/Products/Archetypes/ReferenceEngine.py", line 145, in manage_afterAdd
rc.catalog_object(self, url)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.Archetypes-1.9.4-py2.7.egg/Products/Archetypes/UIDCatalog.py", line 189, in catalog_object
ZCatalog.catalog_object(self, obj, uid, **kwargs)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.ZCatalog-2.13.23-py2.7.egg/Products/ZCatalog/ZCatalog.py", line 476, in catalog_object
update_metadata=update_metadata)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.ZCatalog-2.13.23-py2.7.egg/Products/ZCatalog/Catalog.py", line 320, in catalogObject
index = self.updateMetadata(object, uid, None)
File "/var/db/zope/plone43/buildout-cache/eggs/Products.ZCatalog-2.13.23-py2.7.egg/Products/ZCatalog/Catalog.py", line 276, in updateMetadata
while not data.insert(index, newDataRecord):
File "/var/db/zope/plone43/buildout-cache/eggs/ZODB3-3.10.5-py2.7-linux-x86_64.egg/ZODB/Connection.py", line 860, in setstate
self._setstate(obj)
File "/var/db/zope/plone43/buildout-cache/eggs/ZODB3-3.10.5-py2.7-linux-x86_64.egg/ZODB/Connection.py", line 901, in _setstate
p, serial = self._storage.load(obj._p_oid, '')
File "/var/db/zope/plone43/buildout-cache/eggs/ZODB3-3.10.5-py2.7-linux-x86_64.egg/ZODB/Connection.py", line 1270, in load
return self._storage.load(oid, '')
File "/var/db/zope/plone43/buildout-cache/eggs/ZODB3-3.10.5-py2.7-linux-x86_64.egg/ZEO/ClientStorage.py", line 833, in load
data, tid = self._server.loadEx(oid)
File "/var/db/zope/plone43/buildout-cache/eggs/ZODB3-3.10.5-py2.7-linux-x86_64.egg/ZEO/ServerStub.py", line 176, in loadEx
return self.rpc.call("loadEx", oid)
File "/var/db/zope/plone43/buildout-cache/eggs/ZODB3-3.10.5-py2.7-linux-x86_64.egg/ZEO/zrpc/connection.py", line 763, in call
r_args = self.wait(msgid)
File "/var/db/zope/plone43/buildout-cache/eggs/ZODB3-3.10.5-py2.7-linux-x86_64.egg/ZEO/zrpc/connection.py", line 791, in wait
self.replies_cond.wait()
File "/var/db/zope/plone43/Python-2.7/lib/python2.7/threading.py", line 339, in wait
waiter.acquire()
During this time I notice that the client it is using maxes out at %100 CPU usage, and this causes the site to come to a crawling slow-down if not a complete halt. I have watched this in our production environment which has anywhere from 5 to 40 simultaneous users at any given time. So I tested this in a local install of our site and even with just 1 user I see the same results.
We are running Plone 4.3.2 on a RedHat 5.10 (Tikanga) 64-bit box. My local VM is running CentOS 5.10 with the same Plone version. For our production environment we are running across two servers, one dedicated to running the Zeoserver, and the 2nd is running 4 clients.
I can't figure out why it is hanging here and taking up so many resources. Any suggestions or solutions would be much appreciated!
UPDATE
We recently upgraded from Plone 4.2 to Plone 4.3 and it seems like this issue either started at that time, or at least increased significantly. We do use our own custom theme and content types, but they were based off of the ATContentTypes and we made what updates we needed for them to work with Plone 4.3
EDIT
I changed the ZEO server logging level to DEBUG, and this was all I got:
2014-05-28T14:38:35 (unconnected) disconnected
2014-05-28T14:38:44 new transaction
2014-05-28T14:38:44 (192.168.56.102:54486) ('1') lock: transactions waiting: 0
2014-05-28T14:38:44 (192.168.56.102:54486) Preparing to commit transaction: 2 objects, 763 bytes
2014-05-28T14:38:44 new transaction
2014-05-28T14:38:44 (192.168.56.102:54486) ('1') lock: transactions waiting: 0
2014-05-28T14:38:44 (192.168.56.102:54486) Preparing to commit transaction: 1 objects, 123 bytes
2014-05-28T14:39:22 new transaction
2014-05-28T14:39:22 (192.168.56.102:54474) ('1') lock: transactions waiting: 0
2014-05-28T14:39:22 (192.168.56.102:54474) Preparing to commit transaction: 885 objects, 458894 bytes
2014-05-28T14:40:44 new transaction
2014-05-28T14:40:44 (192.168.56.102:54486) ('1') lock: transactions waiting: 0
2014-05-28T14:40:44 (192.168.56.102:54486) Preparing to commit transaction: 1 objects, 123 bytes
According to Client1 LongRequest.log the action (Check out) started at 14:39:07 and ended at 14:39:28 (this was one of the faster response times).

Related

middleware dont solve ExtraDataLengthError in test binance provider, resulting in unknown account

Its a specific error in context of binance, code if you want test in:
import web3
HTTPProvider = 'https://data-seed-prebsc-1-s1.binance.org:8545/'
w3 = web3.Web3(web3.Web3.HTTPProvider(HTTPProvider))
#middleware
#w3.middleware_onion.inject(web3.middleware.geth_poa_middleware, layer=0) #(change to other error)
#w3.eth.get_block('latest')
my_account = w3.eth.account.privateKeyToAccount('my_private_key_wallet')
abi = 'my_abi'
w3.eth.contract(contract_address1,abi=abi).functions.transfer(wallet_address2, 1).transact({'from': my_account._address})
If run wihout middleware:
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/contract.py", line 997, in transact
return transact_with_contract_function(
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/contract.py", line 1590, in transact_with_contract_function
txn_hash = web3.eth.send_transaction(transact_transaction)
...
File "/home/ubuntu/env/lib/python3.8/site-packages/eth_utils/applicators.py", line 84, in apply_formatters_to_dict
yield key, formatters[key](item)
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/middleware/validation.py", line 71, in check_extradata_length
raise ExtraDataLengthError(
web3.exceptions.ExtraDataLengthError: The field extraData is 97 bytes, but should be 32. It is quite likely that you are connected to a POA chain. Refer to http://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority for more details. The full extraData is: HexBytes('0x...')
If run with middleware solution (that explained in other issues in internet):
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/contract.py", line 997, in transact
return transact_with_contract_function(
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/contract.py", line 1590, in transact_with_contract_function
txn_hash = web3.eth.send_transaction(transact_transaction)
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/eth.py", line 686, in send_transaction
return self._send_transaction(transaction)
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/module.py", line 57, in caller
result = w3.manager.request_blocking(method_str,
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/manager.py", line 187, in request_blocking
return self.formatted_response(response,
File "/home/ubuntu/env/lib/python3.8/site-packages/web3/manager.py", line 168, in formatted_response
raise ValueError(response["error"])
ValueError: {'code': -32000, 'message': 'unknown account'}
Functions of wallet (like Balance) and Contract (like Total Supply) works in web3py.
Sending tokens by Metamask works, but I need to send tokens via web3py
I use buildTransaction, sign_transaction and send_raw_transaction.
And key in pure form (0x...)

celery task raise an error while redis backend connection socket timeout

I am using celery to run task one by one with redis broker, but when i run 2 task then after completing the first redis given an timeout socket error for second task so that second task would be failed.
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/celery/result.py", line 194, in get
on_message=on_message,
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/celery/backends/async.py", line 189, in wait_for_pending
for _ in self._wait_for_pending(result, **kwargs):
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/celery/backends/async.py", line 256, in _wait_for_pending
on_interval=on_interval):
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/celery/backends/async.py", line 57, in drain_events_until
yield self.wait_for(p, wait, timeout=1)
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/celery/backends/async.py", line 66, in wait_for
wait(timeout=timeout)
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/celery/backends/redis.py", line 69, in drain_events
m = self._pubsub.get_message(timeout=timeout)
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/redis/client.py", line 2513, in get_message
response = self.parse_response(block=False, timeout=timeout)
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/redis/client.py", line 2430, in parse_response
return self._execute(connection, connection.read_response)
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/redis/client.py", line 2408, in _execute
return command(*args)
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/redis/connection.py", line 624, in read_response
response = self._parser.read_response()
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/redis/connection.py", line 284, in read_response
response = self._buffer.readline()
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/redis/connection.py", line 216, in readline
self._read_from_socket()
File "/home/ubuntu/.virtualenvs/aide_venv/local/lib/python2.7/site-packages/redis/connection.py", line 187, in _read_from_socket
raise TimeoutError("Timeout reading from socket")
TimeoutError: Timeout reading from socket
I am running celery by using this command:
celery -A flask_application.celery worker --loglevel=info --max-tasks-per-child=1 --concurrency=1
I am calling celery task by using: .delay() function
celery_response = run_algo.run_pipeline.delay(request.get_json())
Getting output by using: .get() function
output_file_path = celery_response.get()
There's a warning in the docs of AsyncResult.get saying that calling it it within an async task can cause a deadlock, which may be what's happening here, though it's hard to tell without more context of where these things are being called.

Celery upgrade (3.1->4.1) - Connection reset by peer

We are working with celery at the last year, with ~15 workers, each one defined with concurrency between 1-4.
Recently we upgraded our celery from v3.1 to v4.1
Now we are having the following errors in each one of the workers logs, any ideas what can cause to such error?
2017-08-21 18:33:19,780 94794 ERROR Control command error: error(104, 'Connection reset by peer') [file: pidbox.py, line: 46]
Traceback (most recent call last):
File "/srv/dy/venv/lib/python2.7/site-packages/celery/worker/pidbox.py", line 42, in on_message
self.node.handle_message(body, message)
File "/srv/dy/venv/lib/python2.7/site-packages/kombu/pidbox.py", line 129, in handle_message
return self.dispatch(**body)
File "/srv/dy/venv/lib/python2.7/site-packages/kombu/pidbox.py", line 112, in dispatch
ticket=ticket)
File "/srv/dy/venv/lib/python2.7/site-packages/kombu/pidbox.py", line 135, in reply
serializer=self.mailbox.serializer)
File "/srv/dy/venv/lib/python2.7/site-packages/kombu/pidbox.py", line 265, in _publish_reply
**opts
File "/srv/dy/venv/lib/python2.7/site-packages/kombu/messaging.py", line 181, in publish
exchange_name, declare,
File "/srv/dy/venv/lib/python2.7/site-packages/kombu/messaging.py", line 203, in _publish
mandatory=mandatory, immediate=immediate,
File "/srv/dy/venv/lib/python2.7/site-packages/amqp/channel.py", line 1748, in _basic_publish
(0, exchange, routing_key, mandatory, immediate), msg
File "/srv/dy/venv/lib/python2.7/site-packages/amqp/abstract_channel.py", line 64, in send_method
conn.frame_writer(1, self.channel_id, sig, args, content)
File "/srv/dy/venv/lib/python2.7/site-packages/amqp/method_framing.py", line 178, in write_frame
write(view[:offset])
File "/srv/dy/venv/lib/python2.7/site-packages/amqp/transport.py", line 272, in write
self._write(s)
File "/usr/lib64/python2.7/socket.py", line 224, in meth
return getattr(self._sock,name)(*args)
error: [Errno 104] Connection reset by peer
BTW: our tasks in the form:
#app.task(name='EXAMPLE_TASK'],
bind=True,
base=ConnectionHolderTask)
def example_task(self, arg1, arg2, **kwargs):
# task code
We are also having massive issues with celery... I spend 20% of my time just dancing around weird idle-hang/crash issues with our workers sigh
We had a similar case that was caused by a high concurrency combined with a high worker_prefetch_multiplier, as it turns out fetching thousands of tasks is a good way to frack the connection.
If that's not the case: try to disable the broker pool by setting broker_pool_limit to None.
Just some quick ideas that might (hopefully) help :-)

run mrjob on Amazon EMR, t2.micro not supported

I tried to run a mrjob script on Amazon EMR. It worked well when I used instance c1.medium, however, it had an error when I changed instnace to t2.micro. The full error message was shown below.
C:\Users\Administrator\MyIpython>python word_count.py -r emr 111.txt
using configs in C:\Users\Administrator.mrjob.conf creating new
scratch bucket mrjob-875a948553aab9e8 using
s3://mrjob-875a948553aab9e8/tmp/ as our scratch dir on S3 creating tmp
directory c:\users\admini~1\appdata\local\temp\word_count.Administr
ator.20150731.013007.592000 writing master bootstrap script to
c:\users\admini~1\appdata\local\temp\word_cou
nt.Administrator.20150731.013007.592000\b.py
PLEASE NOTE: Starting in mrjob v0.5.0, protocols will be strict by
default. It's recommended you run your job with --strict-protocols or
set up mrjob.conf as de scribed at
https://pythonhosted.org/mrjob/whats-new.html#ready-for-strict-protoc
ols
creating S3 bucket 'mrjob-875a948553aab9e8' to use as scratch space
Copying non-input files into
s3://mrjob-875a948553aab9e8/tmp/word_count.Administ
rator.20150731.013007.592000/files/ Waiting 5.0s for S3 eventual
consistency Creating Elastic MapReduce job flow Traceback (most recent
call last): File "word_count.py", line 16, in
MRWordFrequencyCount.run() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\job.py", line 461, in run
mr_job.execute() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\job.py", line 479, in execute
super(MRJob, self).execute() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\launch.py", line 153, in
execute
self.run_job() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\launch.py", line 216, in
run_job
runner.run() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\runner.py", line 470, in run
self._run() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\emr.py", line 881, in
_run
self._launch() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\emr.py", line 886, in
_launch
self._launch_emr_job() File "F:\Program Files\Anaconda\lib\site-packages\mrjob\emr.py", line 1593, in
_launch_emr_job
persistent=False) File "F:\Program Files\Anaconda\lib\site-packages\mrjob\emr.py", line 1327, in
_create_job_flow
self._job_name, self._opts['s3_log_uri'], **args) File "F:\Program Files\Anaconda\lib\site-packages\mrjob\retry.py", line
149, i n call_and_maybe_retry
return f(*args, **kwargs) File "F:\Program Files\Anaconda\lib\site-packages\mrjob\retry.py", line 71, in
call_and_maybe_retry
result = getattr(alternative, name)(*args, **kwargs) File "F:\Program Files\Anaconda\lib\site-packages\boto\emr\connection.py",
lin e 581, in run_jobflow
'RunJobFlow', params, RunJobFlowResponse, verb='POST') File "F:\Program Files\Anaconda\lib\site-packages\boto\connection.py", line
12 08, in get_object
raise self.ResponseError(response.status, response.reason, body) boto.exception.EmrResponseError: EmrResponseError: 400 Bad Request
Sender
ValidationError
Instance type 't2.micro' is not supported c3ee1107-3723-11e5-8d8e-f1011298229d
This is my config file detail
runners:
emr:
aws_access_key_id: xxxxxxxxxxx
aws_secret_access_key: xxxxxxxxxxxxx
aws_region: us-east-1
ec2_key_pair: EMR
ec2_key_pair_file: C:\Users\Administrator\EMR.pem
ssh_tunnel_to_job_tracker: false
ec2_instance_type: t2.micro
num_ec2_instances: 2
EMR doesn't support the t2 instance type. If you're worried about money, spot instances are a very cost-effective option: right now m1.xlarge is less than $0.05 per hour, and m1.medium is $0.01 per hour (cheaper than t2.micro anyway) Supported types are the following (screenshot from the EMR webapp console:

Canopy - get Access Denied error

I'm learning python (from a very low baseline) and recently re-installed Canopy (on a MacBook) It was working fine before.
Now whenever I try an launch the editor I get a Access Denied error.
Can anyone help? Please bear in mind my inexperience
Thanks
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/envisage/ui/tasks/tasks_application.py", line 205, in create_window
window.add_task(task)
File "/Applications/Canopy.app/appdata/canopy-1.1.0.1371.macosx-x86_64/Canopy.app/Contents/lib/python2.7/site-packages/pyface/tasks/task_window.py", line 187, in add_task
state.dock_panes.append(dock_pane_factory(task=task))
File "build/bdist.macosx-10.5-i386/egg/canopy/plugin/editor_task.py", line 143, in _create_python_pane
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/envisage/application.py", line 371, in get_service
protocol, query, minimize, maximize
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/envisage/service_registry.py", line 78, in get_service
services = self.get_services(protocol, query, minimize, maximize)
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/envisage/service_registry.py", line 115, in get_services
actual_protocol, name, obj, properties, service_id
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/envisage/service_registry.py", line 259, in _resolve_factory
obj = obj(**properties)
File "build/bdist.macosx-10.5-i386/egg/canopy/python_frontend/plugin.py", line 109, in _frontend_manager_service_factory
File "build/bdist.macosx-10.5-i386/egg/canopy/app/running_process_manager.py", line 82, in register_proc
File "build/bdist.macosx-10.5-i386/egg/canopy/app/util.py", line 53, in get_exe_or_cmdline
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/psutil/_common.py", line 80, in get
ret = self.func(instance)
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/psutil/init.py", line 331, in exe
return guess_it(fallback=err)
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/psutil/init.py", line 314, in guess_it
cmdline = self.cmdline
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/psutil/init.py", line 346, in cmdline
return self._platform_impl.get_process_cmdline()
File "/Users/simonthompson/Library/Enthought/Canopy_64bit/System/lib/python2.7/site-packages/psutil/_psosx.py", line 153, in wrapper
raise AccessDenied(self.pid, self._process_name)
AccessDenied: (pid=343)
DEBUG|2013-11-03 21:19:25|QtWarningMsg: QImage::scaled: Image is a null image
Since the supplied information is insufficient, the answer is the same. This is about user authentication. I don't know how you open the app but, your app tries to open a file or a process which is could not be opened by your user. If you open your app with root privileges there won't be any problem.

Categories

Resources