This is the sample code I got from the http://snmplabs.com/pysnmp/examples/v3arch/asyncore/agent/cmdrsp/agent-side-mib-implementations.html
here they gave sample program for "Multiple MIB trees under distinct context names".
from pysnmp.entity import engine, config
from pysnmp.entity.rfc3413 import cmdrsp, context
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.smi import instrum, builder
from pysnmp.proto.api import v2c
# Create SNMP engine
snmpEngine = engine.SnmpEngine()
# Transport setup
# UDP over IPv4
config.addTransport(
snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('127.0.0.1', 161))
)
# SNMPv3/USM setup
# user: usr-md5-none, auth: MD5, priv NONE
config.addV3User(
snmpEngine, 'usr-md5-none',
config.usmHMACMD5AuthProtocol, 'authkey1'
)
# Allow full MIB access for each user at VACM
config.addVacmUser(snmpEngine, 3, 'usr-md5-none', 'authNoPriv', (1, 3, 6, 1, 2, 1), (1, 3, 6, 1, 2, 1))
# Create an SNMP context with default ContextEngineId (same as SNMP engine ID)
snmpContext = context.SnmpContext(snmpEngine)
# Create multiple independent trees of MIB managed objects (empty so far)
mibTreeA = instrum.MibInstrumController(builder.MibBuilder())
mibTreeB = instrum.MibInstrumController(builder.MibBuilder())
# Register MIB trees at distinct SNMP Context names
snmpContext.registerContextName(v2c.OctetString('context-a'), mibTreeA)
snmpContext.registerContextName(v2c.OctetString('context-b'), mibTreeB)
# Register SNMP Applications at the SNMP engine for particular SNMP context
cmdrsp.GetCommandResponder(snmpEngine, snmpContext)
cmdrsp.SetCommandResponder(snmpEngine, snmpContext)
cmdrsp.NextCommandResponder(snmpEngine, snmpContext)
cmdrsp.BulkCommandResponder(snmpEngine, snmpContext)
# Register an imaginary never-ending job to keep I/O dispatcher running forever
snmpEngine.transportDispatcher.jobStarted(1)
# Run I/O dispatcher which would receive queries and send responses
try:
snmpEngine.transportDispatcher.runDispatcher()
except:
snmpEngine.transportDispatcher.closeDispatcher()
raise
I tried it with following snmp walk
snmpwalk -v3 -u usr-md5-none -l authNoPriv -A authkey1 -n context-a 127.0.0.1 .1.3.6
and I am getting
SNMPv2-SMI::dod = No more variables left in this MIB View (It is past the end of the MIB tree)
I understood this is happening because the MIB trees are empty. But how to add my data to it?
To generically add or override a functioning MIB tree's OID, you can add the code below to the code example you provided:
...
mibBuilder = snmpContext.getMibInstrum().getMibBuilder()
MibScalar, MibScalarInstance = mibBuilder.importSymbols(
'SNMPv2-SMI', 'MibScalar', 'MibScalarInstance'
)
class MyStaticMibScalarInstance(MibScalarInstance):
def getValue(self, name, idx):
currentDT = datetime.datetime.now()
return self.getSyntax().clone(
'Hello World! It\'s currently: ' + str(currentDT)
)
mibBuilder.exportSymbols(
'__MY_MIB', MibScalar((1, 3, 6, 1, 2, 1, 1, 1), v2c.OctetString()),
MyStaticMibScalarInstance((1, 3, 6, 1, 2, 1, 1, 1), (0,), v2c.OctetString())
)
# Register SNMP Applications at the SNMP engine for particular SNMP context
cmdrsp.GetCommandResponder(snmpEngine, snmpContext)
...
# snmpwalk -v3 -u usr-md5-none -l authNoPriv -A authkey1 127.0.0.1 .1.3.6
iso.3.6.1.2.1.1.1.0 = STRING: "Hello World! It's currently: 2019-11-18 16:02:43.796005"
iso.3.6.1.2.1.1.2.0 = OID: iso.3.6.1.4.1.20408
iso.3.6.1.2.1.1.3.0 = Timeticks: (494) 0:00:04.94
iso.3.6.1.2.1.1.4.0 = ""
iso.3.6.1.2.1.1.5.0 = ""
iso.3.6.1.2.1.1.6.0 = ""
iso.3.6.1.2.1.1.7.0 = INTEGER: 0
iso.3.6.1.2.1.1.8.0 = Timeticks: (0) 0:00:00.00
iso.3.6.1.2.1.11.1.0 = Counter32: 13
iso.3.6.1.2.1.11.2.0 = Counter32: 0
iso.3.6.1.2.1.11.3.0 = Counter32: 0
iso.3.6.1.2.1.11.4.0 = Counter32: 0
iso.3.6.1.2.1.11.5.0 = Counter32: 0
iso.3.6.1.2.1.11.6.0 = Counter32: 0
iso.3.6.1.2.1.11.8.0 = Counter32: 0
iso.3.6.1.2.1.11.9.0 = Counter32: 0
If you really need that second context and independent tree you can create a generic controller to examine and send back whatever you want.
# Create an SNMP context with default ContextEngineId (same as SNMP engine ID)
snmpContext = context.SnmpContext(snmpEngine)
# Very basic Management Instrumentation Controller without
# any Managed Objects attached. It supports only GET's and
# modded to react to a target OID, otherwise
# always echos request var-binds in response.
class EchoMibInstrumController(instrum.AbstractMibInstrumController):
def readVars(self, varBinds, acInfo=(None, None)):
retItem = []
for ov in varBinds:
if str(ov[0]) == '1.3.6.1.2.1.1.1.0':
currentDT = datetime.datetime.now()
retItem.extend([(ov[0], v2c.OctetString('Hello World! It\'s currently: %s' % str(currentDT)))])
else:
retItem.extend([(ov[0], v2c.OctetString('You queried OID %s' % ov[0]))])
return retItem
# Create multiple independent trees of MIB managed objects (empty so far)
mibTreeA = EchoMibInstrumController()
mibTreeB = instrum.MibInstrumController(builder.MibBuilder())
# Register MIB trees at distinct SNMP Context names
snmpContext.registerContextName(v2c.OctetString('context-a'), mibTreeA)
snmpContext.registerContextName(v2c.OctetString('context-b'), mibTreeB)
snmpget -v3 -u usr-md5-none -l authNoPriv -A authkey1 -n context-a 127.0.0.1 .1.3.6.1.2.1.1.1.0
# snmpget -v3 -u usr-md5-none -l authNoPriv -A authkey1 127.0.0.1 .1.3.6.1.2.1.1.1.0
iso.3.6.1.2.1.1.1.0 = STRING: "PySNMP engine version 4.4.12, Python 3.5.2 (default, Jul 5 2016, 12:43:10) [GCC 5.4.0 20160609]"
# snmpget -v3 -u usr-md5-none -l authNoPriv -A authkey1 -n context-a 127.0.0.1 .1.3.6.1.2.1.1.1.0
iso.3.6.1.2.1.1.1.0 = STRING: "Hello World! It's currently: 2019-11-18 15:56:26.598242"
# snmpget -v3 -u usr-md5-none -l authNoPriv -A authkey1 -n context-a 127.0.0.1 .1.3.6.1.2.1.1.2.0
iso.3.6.1.2.1.1.2.0 = STRING: "You queried OID 1.3.6.1.2.1.1.2.0"
Related
I a struggling to make the following code to work.
I want to register a OID and wit for a GET request but the code has an error that I can't solve.
from pysnmp.carrier.asyncio.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.smi import builder, exval, view, error
from pysnmp.proto import rfc1902
from functools import partial
# create SNMP engine instance
snmpEngine = engine.SnmpEngine()
# load custom MIB
mibBuilder = snmpEngine.getMibBuilder()
mibPath = mibBuilder.getMibSources() + (builder.DirMibSource('/home/pi/'),)
mibBuilder.setMibSources(*mibPath)
mibBuilder.loadModules('MY-MIB')
snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder = mibBuilder
# create UDP server transport
udpServerTransport = udp.UdpTransport().openServerMode(('0.0.0.0', 161))
def custom_oid_handler(snmp_engine, oid, tag, value):
# Handle the request for the custom OID here
# oid: the OID being requested
# tag: the type of the requested OID (integer, string, etc.)
# value: the value associated with the requested OID (if any)
#1.3.6.1.4.1.59270.1.1
# For example, to return a string value for the OID:
if oid == (1, 3, 6, 1, 4, 1, 123456, 1):
return rfc1902.OctetString("Custom value for OID 1.3.6.1.4.1.123456.1")
else:
# If the OID is not recognized, return an error
return exval.noSuchObject
# register custom OID handler
reqMsgID = snmpEngine.msgAndPduDsp.getNextReqMsgID()
snmpEngine.msgAndPduDsp.registerContextEngineId(
snmpEngine.snmpContext,
lambda eng, pdu, msgID=reqMsgID, cbFun=partial(custom_oid_handler, snmp_engine): (
(msgID,), (snmpEngine.msgAndPduDsp.ANY_TYPE,), (snmpEngine.msgAndPduDsp.CONTEXT_EXACT_MATCH, 0, 0)
)
)
# add a community string for SNMPv1
community = config.CommunityData('public')
config.addV1System(snmpEngine, 'my-area', community)
# allow read access to the entire OID tree under 1.3.6
config.addVacmUser(snmpEngine, 1, 'my-area', 'noAuthNoPriv', (1, 3, 6), (1, 3, 6))
# start the SNMP server to listen for requests
ntfrcv.NotificationReceiver(snmpEngine).transportDispatcher.registerTransport(udpServerTransport)
ntfrcv.NotificationReceiver(snmpEngine).transportDispatcher.jobStarted(1)
print("SNMP server listening on port 161")
The error is:
reqMsgID = snmpEngine.msgAndPduDsp.getNextReqMsgID()
AttributeError: 'MsgAndPduDispatcher' object has no attribute 'getNextReqMsgID'
Any ideas?
I am trying to import the snmpSessionBaseClass python module in a script I am running, but I do not have the module installed and I can't seem to find where to download it. Does anyone know the pip or yum command to download and install this module? Thanks!
import netsnmp
sys.path.insert(1, os.path.join(sys.path[0], os.pardir))
from snmpSessionBaseClass import add_common_options, get_common_options, verify_host, get_data
from pynag.Plugins import PluginHelper,ok,critical
The following code needs to be added to a file called snmpSessionBaseClass.py and that file needs to be placed in a directory that is in pythons path.
#!/usr/bin/env python
# Copyright (C) 2016 rsmuc <rsmuc#mailbox.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with health_monitoring_plugins. If not, see <http://www.gnu.org/licenses/>.
import pynag
import netsnmp
import os
import sys
dev_null = os.open(os.devnull, os.O_WRONLY)
tmp_stdout = os.dup(sys.stdout.fileno())
def dev_null_wrapper(func, *a, **kwargs):
"""
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null.
This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
"""
os.dup2(dev_null, sys.stdout.fileno())
return_object = func(*a, **kwargs)
sys.stdout.flush()
os.dup2(tmp_stdout, sys.stdout.fileno())
return return_object
def add_common_options(helper):
# Define the common command line parameters
helper.parser.add_option('-H', help="Hostname or ip address", dest="hostname")
helper.parser.add_option('-C', '--community', dest='community', help='SNMP community of the SNMP service on target host.', default='public')
helper.parser.add_option('-V', '--snmpversion', dest='version', help='SNMP version. (1 or 2)', default=2, type='int')
def get_common_options(helper):
# get the common options
host = helper.options.hostname
version = helper.options.version
community = helper.options.community
return host, version, community
def verify_host(host, helper):
if host == "" or host is None:
helper.exit(summary="Hostname must be specified"
, exit_code=pynag.Plugins.unknown
, perfdata='')
netsnmp_session = dev_null_wrapper(netsnmp.Session,
DestHost=helper.options.hostname,
Community=helper.options.community,
Version=helper.options.version)
try:
# Works around lacking error handling in netsnmp package.
if netsnmp_session.sess_ptr == 0:
helper.exit(summary="SNMP connection failed"
, exit_code=pynag.Plugins.unknown
, perfdata='')
except ValueError as error:
helper.exit(summary=str(error)
, exit_code=pynag.Plugins.unknown
, perfdata='')
# make a snmp get, if it fails (or returns nothing) exit the plugin
def get_data(session, oid, helper, empty_allowed=False):
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = session.get(varl)
value = data[0]
if value is None:
helper.exit(summary="snmpget failed - no data for host "
+ session.DestHost + " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
if not empty_allowed and not value:
helper.exit(summary="snmpget failed - no data for host "
+ session.DestHost + " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
return value
# make a snmp get, but do not exit the plugin, if it returns nothing
# be careful! This funciton does not exit the plugin, if snmp get fails!
def attempt_get_data(session, oid):
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = session.get(varl)
value = data[0]
return value
# make a snmp walk, if it fails (or returns nothing) exit the plugin
def walk_data(session, oid, helper):
tag = []
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = list(session.walk(varl))
if len(data) == 0:
helper.exit(summary="snmpwalk failed - no data for host " + session.DestHost
+ " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
for x in range(0, len(data)):
tag.append(varl[x].tag)
return data, tag
# make a snmp walk, but do not exit the plugin, if it returns nothing
# be careful! This function does not exit the plugin, if snmp walk fails!
def attempt_walk_data(session, oid):
tag = []
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = list(session.walk(varl))
for x in range(0, len(data)):
tag.append(varl[x].tag)
return data, tag
def state_summary(value, name, state_list, helper, ok_value = 'ok', info = None):
"""
Always add the status to the long output, and if the status is not ok (or ok_value),
we show it in the summary and set the status to critical
"""
# translate the value (integer) we receive to a human readable value (e.g. ok, critical etc.) with the given state_list
state_value = state_list[int(value)]
summary_output = ''
long_output = ''
if not info:
info = ''
if state_value != ok_value:
summary_output += ('%s status: %s %s ' % (name, state_value, info))
helper.status(pynag.Plugins.critical)
long_output += ('%s status: %s %s\n' % (name, state_value, info))
return (summary_output, long_output)
def add_output(summary_output, long_output, helper):
"""
if the summary output is empty, we don't add it as summary, otherwise we would have empty spaces (e.g.: '. . . . .') in our summary report
"""
if summary_output != '':
helper.add_summary(summary_output)
helper.add_long_output(long_output)
I need get list of all tasks from ansible playbook and show them.
My problem is conditions like ansible_os_family == "Debian" not executing. I see all tasks (like ansible-playbook rplaybooks/main.yml --list-task). But I want only those that will be executed.
I see two ways:
I will check when to current ansible_os_family. I don't know how
to get it?
I will find way inside python-ansible execute this
conditions
I created class, that allow to get playbook tasks
playbook.py:
import sys
import os
import stat
import json
import ansible.playbook
import ansible.constants as C
import ansible.utils.template
from ansible import errors
from ansible import callbacks
from ansible import utils
from ansible.color import ANSIBLE_COLOR, stringc
from ansible.callbacks import display
if __name__ !='__main__':
logging.basicConfig(format = u'%(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, filename = u'/var/log/rderole.log')
class PyPlaybook(object):
__filtered_tags=['all']
def _add_inventory(self,hosts=["127.0.0.1"],params={}):
""" create inventory obj and add it to params """
playbook=params["playbook"]
inventory=ansible.inventory.Inventory(hosts)
inventory.set_playbook_basedir(os.path.dirname(playbook))
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
params["inventory"]=inventory
params["stats"]=stats
params["callbacks"]=playbook_cb
params["runner_callbacks"]=runner_cb
return params
def _playbook_for_hosts(self,hosts=["127.0.0.1"],params={}):
""" return playbook object with inventory """
# interface to Playbook class
"""pb=ansible.playbook.PlayBook(
playbook = playbook,
host_list = host_list,
module_path = module_path,
forks = forks ,
timeout = timeout,
remote_user = remote_user,
remote_pass = remote_pass,
sudo_pass = sudo_pass,
remote_port = remote_port,
transport = transport,
private_key_file = private_key_file,
callbacks = callbacks,
runner_callbacks = runner_callbacks,
stats = stats,
sudo = sudo,
sudo_user = sudo_user,
extra_vars = extra_vars,
only_tags = only_tags,
skip_tags = skip_tags,
subset = subset,
inventory = inventory,
check = check,
diff = diff,
any_errors_fatal = any_errors_fatal,
su = su,
su_user = su_user,
su_pass = su_pass ,
vault_password = vault_password,
force_handlers = force_handlers,
)"""
playbook_params=self._add_inventory(hosts,params)
pb=ansible.playbook.PlayBook(**playbook_params)
return pb
def get_tags(self,hosts=["127.0.0.1"],params={}):
pb=self._playbook_for_hosts(hosts,params)
playnum = 0
tags_array={}
for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
playnum += 1
play = ansible.playbook.Play(pb, play_ds, play_basedir)
label = play.name
matched_tags, unmatched_tags = play.compare_tags(pb.only_tags)
# Remove skipped tasks
matched_tags = matched_tags - set(pb.skip_tags)
unmatched_tags.discard('all')
unknown_tags = ((set(pb.only_tags) | set(pb.skip_tags)) -
(matched_tags | unmatched_tags))
if unknown_tags:
continue
print ' play #%d (%s):' % (playnum, label)
for task in play.tasks():
if (set(task.tags).intersection(pb.only_tags) and not
set(task.tags).intersection(pb.skip_tags)):
if getattr(task, 'name', None) is not None:
# meta tasks have no names
print ' %s %s %s' % (task.tags,task.name,task.when)
for task_tag in task.tags:
#print '>> %s %s' % (task_tag,task.name)
try:
tags_array[task_tag].append(task.name)
except:
tags_array[task_tag]=[]
tags_array[task_tag].append(task.name)
try:
for tag in self.__filtered_tags:
try:
del tags_array[tag]
except:
pass
except:
pass
print json.dumps(tags_array, sort_keys=True, indent=4, separators=(',', ': '))
return tags_array
if __name__ =='__main__':
p=PyPlaybook()
options={'playbook':'/playbooks/rde/main.yml','subset': None, 'private_key_file': None, 'skip_tags': None, 'diff': False, 'check': False, 'remote_user': 'root', 'forks': 5, 'transport': 'local', 'timeout': 10, 'module_path': None}
#'only_tags':['base'], 'skip_tags':['base']
#p.run_playbook(["127.0.0.1"],options)
p.get_tags(["127.0.0.1"],options)
/playbooks/rde/main.yml:
- include: debian.yml
when: "ansible_os_family == 'Debian'"
- include: redhat.yml
when: "ansible_os_family == 'RedHat'"
redhat.yml
---
- name: Install x2go application RedHat
yum: name=x2goserver state=present
when: ansible_os_family == "RedHat"
tags:
- remote-access-x2go
debian.yml
---
- name: Add x2go repository
apt_repository: repo='deb http://ppa.launchpad.net/x2go/stable/ubuntu precise main' state=present
apt_repository: repo='deb-src http://ppa.launchpad.net/x2go/stable/ubuntu precise main' state=present
when: ansible_os_family == "Debian"
tags:
- remote-access-x2go
- name: Install x2go application
apt: name=x2goserver update_cache=yes
apt: name=x2goserver-xsession update_cache=no
when: ansible_os_family == "Debian"
tags:
- remote-access-x2go
python playbook.py
play #1 (RDE Role):
['all', 'remote-access-x2go'] Add x2go repository jinja2_compare ansible_os_family == "Debian"
['all', 'remote-access-x2go'] Install x2go application jinja2_compare ansible_os_family == "Debian"
['all', 'remote-access-x2go'] Install x2go application RedHat jinja2_compare ansible_os_family == "RedHat"
{
"remote-access-x2go": [
"Add x2go repository",
"Install x2go application",
"Install x2go application RedHat"
]
}
I don't think that's possible. Evaluation of when clauses is considered part of task-execution itself. Only way would be to hack the code.
Perhaps post the question in ansible group where I expect you'll get a more confident yes/no answer.
Instead using when, you should include the file directly, e.g.
- include: "{{ ansible_os_family }}.yml"
And make sure you've the relevant files in place for supported systems.
Check also: Define Ansible variable in a role with OS specific default which can be easily overridden
I am very new to python and trying to work on this script which recieves data from multiple ftp sites and download yesterday data according to date directory to my local folder. but if the receives fails on any day it would not update that day records and would go to the next day. I want to sync the files that even if it is missed on particular it should complete sync teh new files to local folder I have tried looking at rsync but need your help to process it on the script.this is my script.
MAX_CHILDREN = 16
ftp_site_prog = "/usr/local/bin/ftp_site.py"
class SpawnQ:
def __init__(self, max_pids):
self.max_pids = max_pids
self.queue = []
tmp = re.split("/", ftp_site_prog)
self.my_name = tmp[-1]
def addQ(self, site_id):
self.queue.append(site_id)
return
def runQ(self):
while (len(self.queue) != 0):
# Check how many sessions are running
cmd = """ps -ef | grep "%s" | grep -v grep""" % self.my_name
num_pids = 0
for line in os.popen(cmd).readlines():
num_pids = num_pids + 1
if (num_pids < self.max_pids):
site_id = self.queue.pop()
# print site_id
# print "Forking........"
fpid = os.fork()
if fpid:
# print "Created child: ", fpid
os.waitpid(fpid, os.WNOHANG)
else:
# print "This is the Child"
# Exec the ftp_site
arg_string = "%s" % site_id
args = [arg_string]
os.execvp(ftp_site_prog, (ftp_site_prog,) + tuple(args))
how to call rsync on my py script.//os.system("rsync -ftp_site_prog, (ftp_site_prog,)+ tuple(args))
sys.exit(0)
else:
# print "Waiting for a spare process...."
time.sleep(10)
return
# Get a list of the sites
db_obj = nprint.celDb()
site_list = db_obj.get_all_site_ids()
myQ = SpawnQ(MAX_CHILDREN)
for site_id in site_list:
myQ.addQ(site_id)
myQ.runQ()
# Wait until we only have the parent left
# Check how many sessions are running
tmp = re.split("/",ftp_site_prog)
ftp_name = tmp[-1]
cmd = """ps -ef | grep "%s" | grep -v grep""" % ftp_name
num_pids = MAX_CHILDREN
while (num_pids > 0):
num_pids = 0
for line in os.popen(cmd).readlines():
num_pids = num_pids + 1
time.sleep(60)
today = datetime.date.today()
daydelta = datetime.timedelta(days=1)
yesterday = today - daydelta
Much of this can be accomplished with the ftplib module for the retrieval of files from standard FTP servers. If you are dealing with SFTP servers you can use the paramiko library.
i using this part of code to getting docs from sphinx but sphinx Query always return None. But if i execute code from command line i get correct result, what wrong?
def search_query(query, offset=0):
mode = SPH_MATCH_EXTENDED
host = 'localhost'
port = 9312
index = 'rss_item'
filtercol = 'group_id'
filtervals = []
sortby = '-#weights'
groupby = 'id'
groupsort = '#group desc'
limit = 30
# do query
cl = SphinxClient()
cl.SetServer ( host, port )
cl.SetWeights ( [100, 1] )
cl.SetMatchMode ( mode )
#cl.SetSortMode(SPH_SORT_TIME_SEGMENTS)
if limit:
cl.SetLimits ( offset, limit, max(limit,1000) )
res = cl.Query ( query, index )
docs =[]
for item in res['matches']:
docs.append(item['id'])
return docs
# this is django view
def search(request):
q = request.GET.get('q', '')
offset = int(request.GET.get('older', 0))
docs=search_query(q, offset)
result = Item.objects.filter(id__in=docs).all()
objects = dict([(obj.id, obj) for obj in result])
sorted_objects = [objects[id] for id in docs]
result = sorted_objects
return render_to_response('rss/_items.html',
{'latest_items':result, 'offset':offset+30,'q':q},
context_instance=RequestContext(request))
Server : centos6, sphinx: 2.0.5, django: 1.4.2 , apache/wsgi
resolve issue, by turn Selinux OFF and restart server, i listen to apache process by this command:
ps auxw | grep httpd | awk '{print"-p " $2}' | xargs strace
and see permission denied for access apache to sphinx and memcache