I am designing a web page named display.html to display course content data in grid view.
Following link shows the http://www.prepbootstrap.com/bootstrap-theme/dark-admin/preview/bootstrap-grid.html type of display I needed.
First row will contain course name, second will contain sub section name and so on.
I'm using django python. I've written a function in views.py for retrieving data from mongodb.
The function returns a dictionary contains mapping of course name with list of chapter name, chapter name with list of sub-section name, and so on and renders it on web page display.html but I'm not able to display the data properly on html page.
My views.py functions are:
def get_course_structure(course_lst):
final_data = dict()
connection = MongoClient("server_ip", port)
database_name = 'edxapp'
database = connection[database_name]
cursor = database.modulestore.aggregate([{ "$match": {"_id.course":"CS000", "_id.category":"chapter"} },
{ "$project": { "metadata": 1, "definition": 1 } } ])
chapter = []
lst_chapter = []
for data in cursor:
chapter.append(data)
lst_chapter.append(data['metadata']['display_name'])
#print "CS000", lst_chapter
final_data= {}
final_data["CS000"] = lst_chapter
#for data in chp:
# print data['metadata']['display_name']
sequential = []
for data in chapter:
lst_children = data['definition']['children']
#print lst_children
lst_seq = []
for child in lst_children:
child = child.split('/')
#print child[5]
cur = database.modulestore.aggregate([{ "$match": {"_id.name":child[5]} },
{ "$project": { "metadata": 1, "definition": 1 } } ])
for rec in cur:
#print rec
sequential.append(rec)
lst_seq.append(rec['metadata']['display_name'])
#print data['metadata']['display_name'], lst_seq
final_data[data['metadata']['display_name']] = lst_seq
#for data in sequential:
# print data, "\n"
vertical = []
for data in sequential:
lst_children = data['definition']['children']
lst_ver = []
for child in lst_children:
child = child.split('/')
cur = database.modulestore.aggregate([{ "$match": {"_id.name":child[5]} },
{ "$project": { "metadata": 1, "definition": 1 } } ])
for rec in cur:
vertical.append(rec)
lst_ver.append(rec['metadata']['display_name'])
#print data['metadata']['display_name'], lst_ver
final_data[data['metadata']['display_name']] = lst_ver
#for data in vertical:
# print data, "\n"
for key in final_data:
print key, final_data[key]
return final_data
and
def get_dashboard(request):
courses_lst = get_student_enrolled_courses(request.GET['student_id'])
course_data = get_course_structure(courses_lst)
return render(request,'mis/display.html', {'course_data':course_data, 'student_id':request.GET['student_id'] })
Any help for doing this.
Related
I'm trying to produce a JSON format for a given entity and I'm having an issue getting the dictionary to NOT overwrite itself or become empty. This is pulling rows from a table in a MySQL database and attempting to produce JSON result from the query.
Here is my function:
def detail():
student = 'John Doe'
conn = get_db_connection()
cur = conn.cursor()
sql = ("""
select
a.student_name,
a.student_id,
a.student_homeroom_name,
a.test_id,
a.datetaken,
a.datecertified,
b.request_number
FROM student_information a
INNER JOIN homeroom b ON a.homeroom_id = b.homeroom_id
WHERE a.student_name = '""" + student + """'
ORDER BY datecertified DESC
""")
cur.execute(sql)
details=cur.fetchall()
dataset = defaultdict(dict)
case_dataset = defaultdict(dict)
case_dataset = dict(case_dataset)
for student_name, student_id, student_homeroom_name, test_id, datetaken, datecertified, request_number in details:
dataset[student_name]['student_id'] = student_id
dataset[student_name]['student_homeroom_name'] = student_homeroom_name
case_dataset['test_id'] = test_id
case_dataset['datetaken'] = datetaken
case_dataset['datecertified'] = datecertified
case_dataset['request_number'] = request_number
dataset[student_name]['additional_information'] = case_dataset
case_dataset.clear()
dataset= dict(dataset)
print(dataset)
cur.close()
conn.close()
I tried a few different ways but nothing seems to work. What I'm getting is nothing in the additonal_information key. What I'm getting is this:
{
"John Doe": {
"student_id": "1234",
"student_homeroom_name": "HR1",
"additional_information": []
}
}
What I'm expecting is something similar to the below JSON. However, I'm torn if this is even correct. Each student will have one to many test_id and I will need to iterate through them in my application.
{
"John Doe": {
"student_id": "1234",
"student_homeroom_name": "HR1",
"additional_information": [
{
"test_id": "0987",
"datetaken": "1-1-1970",
"datecertified": "1-2-1970",
"request_number": "5643"
},
{
"test_id": "12343",
"datetaken": "1-1-1980",
"datecertified": "1-2-1980",
"request_number": "39807"
}
]
}
}
Removing the clear() from the function produces this JSON:
{
"John Doe": {
"student_id": "1234",
"student_homeroom_name": "HR1",
"additional_information": [
{
"test_id": "0987",
"datetaken": "1-1-1970",
"datecertified": "1-2-1970",
"request_number": "5643"
},
{
"test_id": "0987",
"datetaken": "1-1-1970",
"datecertified": "1-2-1970",
"request_number": "5643"
}
]
}
}
lists are mutable objects. Which means that list's are passed by reference.
when you set
dataset[student]['additional_information'] = case_dataset
case_dataset.clear()
you're setting the list and then clearing it. So the list inside additional_information is also cleared.
Copy the list when setting it:
dataset[student]['additional_information'] = case_dataset[:]
case_dataset.clear()
Thanks everyone for the guidance and pointing me in the right direction.
I have what I'm looking for now. Based on some of the comments and troubleshooting, I updated my code. Here is what I did:
I added back additional_dataset as a list
Removed case_dataset = defaultdict(dict) and case_dataset = dict(case_dataset) and replaced it with case_dataset = {}.
Updated dataset[student_name]['additional_information'] = case_dataset with dataset[student_name]['additional_information'] = additional_dataset
Replaced case_dataset.clear() with case_dataset = {}
Here is my new code now
def detail():
student = 'John Doe'
conn = get_db_connection()
cur = conn.cursor()
sql = ("""
select
a.student_name,
a.student_id,
a.student_homeroom_name,
a.test_id,
a.datetaken,
a.datecertified,
b.request_number
FROM student_information a
INNER JOIN homeroom b ON a.homeroom_id = b.homeroom_id
WHERE a.student_name = '""" + student + """'
ORDER BY datecertified DESC
""")
cur.execute(sql)
details=cur.fetchall()
dataset = defaultdict(dict)
case_dataset = {} #2 - Updated to just dict
additional_dataset = [] #1 - added back additional_dataset as a list
for student_name, student_id, student_homeroom_name, test_id, datetaken, datecertified, request_number in details:
dataset[student_name]['student_id'] = student_id
dataset[student_name]['student_homeroom_name'] = student_homeroom_name
case_dataset['test_id'] = test_id
case_dataset['datetaken'] = datetaken
case_dataset['datecertified'] = datecertified
case_dataset['request_number'] = request_number
dataset[student_name]['additional_information'] = additional_dataset #3 - updated to additional_dataset
case_dataset = {} #4 - updated to clear with new dict
dataset= dict(dataset)
print(dataset)
cur.close()
conn.close()
This is what it produces now. This is a much better structure then what I was previously expecting.
{
"John Doe": {
"student_id": "1234",
"student_homeroom_name": "HR1",
"additional_information": [
{
"test_id": "0987",
"datetaken": "1-1-1970",
"datecertified": "1-2-1970",
"request_number": "5643"
},
{
"test_id": "12343",
"datetaken": "1-1-1980",
"datecertified": "1-2-1980",
"request_number": "39807"
}
]
}
}
I've got solution to add table without border suggested by Tanaike but I'm still facing issues in indexing.
I want to insert data in the document in the following order (function - insert_data(file_id)) -
Insert an image in a document (Index = 1)
Insert text in a document (index = 2)
Insert table in a document having invisible borders (index = 3)
Insert text in the document (index = 4)
Insert table in a document again having invisible borders (index = 5)
Insert new line (index = 6)
Insert image in a document (index = 7)
The code I'm trying is-
import io
from gdoctableapppy import gdoctableapp
SERVICE_FILENAME = 'C:/Users/XYZ/Testpython/service_account.json' # set path to service account filename
from googleapiclient.discovery import build
from google.oauth2 import service_account
from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload
credentials = service_account.Credentials.from_service_account_file(SERVICE_FILENAME,
scopes=['https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/documents']
)
docs = build('docs', 'v1', credentials=credentials)
drive = build('drive', 'v3', credentials=credentials)
def create_file(file_name):
file_metadata = {
"title": file_name,
"body": {}
}
file = docs.documents().create(body=file_metadata).execute()
print('File ID: %s' % file.get('documentId'))
file_id = file.get('documentId')
try:
permission = {
"role": "writer",
"type": "user",
'emailAddress': 'xyz#gmail.com'
}
result = drive.permissions().create(fileId=file_id, body=permission).execute()
print(result)
return file_id
except Exception as e:
print('An error occurred:', e)
return None
def insert_data(file_id):
requests = []
values = [['Name of the Client/Organization', 'XYZ'], ['Industry', 'Software']]
requests.append(insert_table_data(file_id, values, index=3))
values2 = [['Country', 'India'], ['State', 'UP']]
requests.append(insert_table_data(file_id, values2, index=5))
requests.append(insert_image(index=1))
requests.append(insert_text(2, '\ntext\n'))
requests.append(insert_text(4, '\nDemo text\n'))
requests.append(insert_text(6, '\n'))
requests.append(insert_image(index=7))
result = docs.documents().batchUpdate(documentId=file_id, body={'requests': requests}).execute()
def insert_image(index):
image_data = {
'insertInlineImage': {
'location': {
'index': index
},
'uri':
'https://www.oberlo.com/media/1603970279-pexels-photo-3.jpg?fit=max&fm=jpg&w=1824',
'objectSize': {
'height': {
'magnitude': 350,
'unit': 'PT'
},
'width': {
'magnitude': 350,
'unit': 'PT'
}
}
}
}
return image_data
def insert_text(index, text):
text_data = {
"insertText":
{
"text": text,
"location":
{
"index": index
}
}
}
return text_data
def insert_table_data(file_id, values, index):
documentId = file_id
resource = {
"oauth2": credentials,
"documentId": documentId,
"rows": len(values),
"columns": len(values[0]),
# "append": True,
"createIndex": index,
"values": values,
}
gdoctableapp.CreateTable(resource)
resource = {
"oauth2": credentials,
"documentId": documentId,
}
res = gdoctableapp.GetTables(resource)
obj = {"color": {"color": {}}, "dashStyle": "SOLID", "width": {"magnitude": 0, "unit": "PT"}}
data = {
"updateTableCellStyle": {
"tableCellStyle": {
"borderBottom": obj,
"borderTop": obj,
"borderLeft": obj,
"borderRight": obj,
},
"tableStartLocation": {
"index": res['tables'][-1]['tablePosition']['startIndex']
},
"fields": "borderBottom,borderTop,borderLeft,borderRight"
}
}
# docs.documents().batchUpdate(documentId=documentId, body={'requests': requests}).execute()
return data
def download_as_docx(file_id):
results = drive.files().get(fileId=file_id, fields="id, name, mimeType, createdTime").execute()
docMimeType = results['mimeType']
mimeTypeMatchup = {
"application/vnd.google-apps.document": {
"exportType": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "docExt": "docx"
}
}
exportMimeType = mimeTypeMatchup[docMimeType]['exportType']
# docExt = mimeTypeMatchup[docMimeType]['docExt']
docName = results['name']
request = drive.files().export_media(fileId=file_id,
mimeType=exportMimeType) # Export formats : https://developers.google.com/drive/api/v3/ref-export-formats
# fh = io.FileIO(docName + "." + docExt, mode='w')
fh = io.FileIO(docName, mode='w')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
def download_as_pdf(file_id, file_name):
request = drive.files().export_media(fileId=file_id,
mimeType='application/pdf')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
fh.seek(0)
filename = file_name.split('.docx')[0] + '.pdf'
with open(filename, 'wb') as fx:
fx.write(fh.getvalue())
def delete_gdrive_file(file_id):
"""Deleted file on Google Drive
:param file_id: ID of Google Drive file
"""
response = drive.files().delete(fileId=file_id).execute()
print(response)
if __name__ == '__main__':
file_name = 'Data.docx'
file_id = create_file(file_name)
insert_data(file_id)
download_as_docx(file_id)
download_as_pdf(file_id, file_name)
delete_gdrive_file(file_id)
Error:
returned "Invalid requests[0].insertTable: Index 4 must be less than the end index of the referenced segment, 2.". Details: "Invalid requests[0].insertTable: Index 4
must be less than the end index of the referenced segment, 2.">
I guess end index of the table goes to 67 but even if I try to insert new data at index 68, it either appends in the last cell of the table or it throws indexing error sometimes.
I should I make the whole data insertion flow dynamic in the google docs.
Modification points:
The library gdoctableapp creates the table by one call. By this, when you request the flow of your question, the index is changed for the tables. I thought that this is the reason of your issue.
In this case, how about the following modification?
Modified script:
Please modify insert_table_data as follows.
def insert_table_data(file_id, values, index):
documentId = file_id
resource = {
"oauth2": credentials,
"documentId": documentId,
"rows": len(values),
"columns": len(values[0]),
# "append": True,
"createIndex": index,
"values": values,
}
gdoctableapp.CreateTable(resource)
And also, please modify insert_data as follows.
def insert_data(file_id):
# Insert texts and images.
index = 1
requests = []
requests.append(insert_image(index))
index += 1
text1 = '\ntext\n'
requests.append(insert_text(index, text1))
index += len(text1)
table1 = index
text2 = '\nDemo text\n'
requests.append(insert_text(index, text2))
index += len(text2)
table2 = index
text3 = '\n'
requests.append(insert_text(index, text3))
index += len(text3)
requests.append(insert_image(index))
docs.documents().batchUpdate(documentId=file_id, body={'requests': requests}).execute()
# Create tables.
values2 = [['Country', 'India'], ['State', 'UP']]
insert_table_data(file_id, values2, table2)
values1 = [['Name of the Client/Organization', 'XYZ'], ['Industry', 'Software']]
insert_table_data(file_id, values1, table1)
# Remove borders of tables.
resource = {"oauth2": credentials, "documentId": file_id}
res = gdoctableapp.GetTables(resource)
obj = {"color": {"color": {}}, "dashStyle": "SOLID", "width": {"magnitude": 0, "unit": "PT"}}
reqs = []
for e in res['tables']:
data = {
"updateTableCellStyle": {
"tableCellStyle": {
"borderBottom": obj,
"borderTop": obj,
"borderLeft": obj,
"borderRight": obj,
},
"tableStartLocation": {
"index": e['tablePosition']['startIndex']
},
"fields": "borderBottom,borderTop,borderLeft,borderRight"
}
}
reqs.append(data)
docs.documents().batchUpdate(documentId=file_id, body={'requests': reqs}).execute()
In this modification, I separate the texts and images, and the tables. By this, the index of tables can be correctly retrieved.
Note:
This modified script is for your question. So when your actual situation is different from your question, this modified script might not be able to be directly used. So please be careful about this.
here is a CSV file :
year,product,price
2021,P01,50
2022,P03,60
2021,P02,30
I'm trying to create a JSON for every year with the list of product like this :
{
"year": "2021",
"products": {
"P02": 30,
"P01": 50
},
"processed": "true"
}
Here is my actual code :
import json
csv = """2021,P01,50
2022,P03,60
2021,P02,30
"""
response = {}
for line in csv.splitlines():
fields = line.split(",")
year, product, price = fields[0], fields[1], fields[2:]
if year not in response:
response[year] = {}
response[year][product] = price
print json.dumps(response)
This is the result I get :
{
"2021": {
"P02": [
"30"
],
"P01": [
"50"
]
},
"2022": {
"P03": [
"60"
]
}
}
Could you help me please to get the result I'm waiting for ?
I start to think that I should maybe use List to make it ...
If the same product in the same year does not have different values then you can create a structure like-
{
"2021": {
"P0": 50,
"P1": 30
},
"2022": {
"P0": 60
}
}
For creating a structure like that
import json
csv = """2021,P01,50
2022,P03,60
2021,P02,30
"""
response = {}
for line in csv.splitlines():
fields = line.split(",")
year, product, price = fields[0], fields[1], fields[2:]
year_response = response.get(year, {})
year_response[product] = price
response[year] = year_response
# iterate the dictionary and create your custom response
for year, year_response in response.items():
file_data = {}
file_date["year"] = year
file_data["products"] = year_response
file_data["processed"] = true
#TODO: add file_data to file now
If the same product in the same year has different values then you can simply use a list instead of a integer value for "P0"
I am making REST calls on a server. The first REST call gets all the projects and from that I store the project's IDs in an array.
Below is the JSON.
For e.g. it would return something like this:
[
{
"expand": "description,lead,url,projectKeys",
"self": "http://localhost:8080/rest/api/2/project/10101",
"id": "10101",
"key": "GR1",
"name": "Group1Project",
"avatarUrls": {
"48x48": "http://localhost:8080/secure/projectavatar?avatarId=10324",
"24x24": "http://localhost:8080/secure/projectavatar?size=small&avatarId=10324",
"16x16": "http://localhost:8080/secure/projectavatar?size=xsmall&avatarId=10324",
"32x32": "http://localhost:8080/secure/projectavatar?size=medium&avatarId=10324"
},
"projectTypeKey": "software"
}
]
Then I'm looping through that array and making another REST call for each project id(10101).
This gives me groups/users against that project.
For example:
{
"self": "http://localhost:8080/rest/api/2/project/10000/role/10100",
"name": "Developers",
"id": 10100,
"actors": [
{
"id": 10207,
"displayName": "group2",
"type": "atlassian-group-role-actor",
"name": "group2",
"avatarUrl": "http://localhost:8080/secure/useravatar?size=xsmall&avatarId=10123"
}
]
}
I want to get all the project IDs where name == group2.
Following is my Python code for all of this but it's not working.
import requests
ids = []
response = requests.get('http://localhost:8080/rest/api/2/project',
auth=('*', '*'))
data = response.json()
for line in data:
ids.append(line["id"])
print(ids)
# Check if group exists in Project roles.
# If it does, then save the project name in the list of arrays.
projectNames = []
for id in ids:
url = 'http://localhost:8080/rest/api/2/project/'+id+'/role/10100'
response = requests.get(url,
auth = ('*', '*'))
data = response.json()
if data.displayName == 'group2':
projectNames.append(["id"])
Could you please help me out how to do this?
Thank you.
Tayyab,
You need to do this. It will work.
for actor in data['actors']:
if actor['displayName']=='group2':
projectNames.append(id)
projectNames = []
for id in ids:
url = 'http://localhost:8080/rest/api/2/project/'+id+'/role/10100'
response = requests.get(url,
auth = ('*', '*'))
data = response.json()
for actor in data["actors"]:
if actor["displayName"] and actor["displayName"] == "group2":
projectNames.append(actor["id"])
projectNames = set()
for id in ids:
url = 'http://localhost:8080/rest/api/2/project/'+id+'/role/10100'
response = requests.get(url,
auth = ('*', '*'))
data = response.json()
group2_actors = [actor['id'] for actor in data['actors']
if actor['displayName'] == 'group2']
if len(group2_actors) > 0:
projectNames.update(group2_actors)
projectNames is a set of unique actor ids with displayName == group2.
some_json = {}
result = [actor['id'] for actor in some_json['actors'] if actor['name']=='group2']
so result for that second json will be [10207]
I have a script which takes data in a SQL Server database and parses it into a key:value pair JSON. I would like to give the three items in the JSON dictionary; one key, such as "ServiceRequest" at the highest level. So that my output would read as:
{
"ServiceRequest": [
{
"SRNUMBER": "1-3580171",
"FirstName": "Myla",
"LastName": "Threeoneone"
}
]
}
Program:
import pyodbc
import json
import collections
import requests
import urllib
connstr = 'DRIVER={SQL Server};SERVER=ServerName;DATABASE=DataBase; UID=UID;PWD=PWD'
conn = pyodbc.connect(connstr)
cursor = conn.cursor()
cursor.execute("""
SELECT SRNUMBER, FirstName, LastName
FROM MYLA311 """)
rows = cursor.fetchall()
# Convert query to row arrays
rowarray_list = []
for row in rows:
t = (row.SRNUMBER)
rowarray_list.append(t)
j = json.dumps(rowarray_list)
rowarrays_file = 'student_rowarrays.js'
f = open(rowarrays_file,'w')
# Convert query to objects of key-value pairs
objects_list = []
for row in rows:
d = collections.OrderedDict()
d['SRNUMBER']= row.SRNUMBER
d['FirstName']= row.FirstName
d['LastName']= row.LastName
objects_list.append(d)
j = json.dumps(objects_list)
objects_file = 'C:\Users\Administrator\Desktop\JSONOutput.txt'
f = open(objects_file,'w')
print >> f, j
print j
conn.close()
Actual Output:
[
{
"SRNUMBER": "1-3580171",
"FirstName": "Myla",
"LastName": "Threeoneone"
}
]
JSON and Python dictionaries are very similar.
Your desired output is already valid Python:
{
"ServiceRequest": [
{
"SRNUMBER": "1-3580171",
"FirstName": "Myla",
"LastName": "Threeoneone"
}
]
}
So all you need to do, is wrap your objects list in a Python dictionary:
output = {
'ServiceRequest': object_list
}
# then dump it
json.dumps(output)
# ...