I am receiving a bunch of images to the flask app via the client file.
client.py
# Generate the parallel requests based on the ThreadPool Executor
from concurrent.futures import ThreadPoolExecutor as PoolExecutor
import sys
import time
import glob
import requests
import threading
import uuid
import base64
import json
import os
#send http request
def call_object_detection_service(image):
try:
url = str(sys.argv[2])
data = {}
#generate uuid for image
id = uuid.uuid5(uuid.NAMESPACE_OID, image)
# Encode image into base64 string
with open (image, 'rb') as image_file:
data['image'] = base64.b64encode(image_file.read()).decode('utf-8')
data['id'] = str(id)
headers = {'Content-Type': 'application/json'}
response = requests.post(url, json= json.dumps(data), headers = headers)
if response.ok:
output = "Thread : {}, input image: {}, output:{}".format(threading.current_thread().getName(),
image, response.text)
print(output)
else:
print ("Error, response status:{}".format(response))
except Exception as e:
print("Exception in webservice call: {}".format(e))
# gets list of all images path from the input folder
def get_images_to_be_processed(input_folder):
images = []
for image_file in glob.iglob(input_folder + "*.jpg"):
images.append(image_file)
return images
def main():
## provide argumetns-> input folder, url, number of wrokers
if len(sys.argv) != 4:
raise ValueError("Arguments list is wrong. Please use the following format: {} {} {} {}".
format("python iWebLens_client.py", "<input_folder>", "<URL>", "<number_of_workers>"))
input_folder = os.path.join(sys.argv[1], "")
images = get_images_to_be_processed(input_folder)
num_images = len(images)
num_workers = int(sys.argv[3])
start_time = time.time()
#craete a worker thread to invoke the requests in parallel
with PoolExecutor(max_workers=num_workers) as executor:
for _ in executor.map(call_object_detection_service, images):
pass
#elapsed_time = time.time() - start_time
#print("Total time spent: {} average response time: {}".format(elapsed_time, elapsed_time/num_images))
if __name__ == "__main__":
main()
I decode them like so
Flask App
app = Flask(__name__)
c = 1
#app.route('/api/object_detection', methods = ['POST'])
def main():
global c
try:
data = request.get_json(force=True)
uid = data.get('id')
image = data.get('image')
print(image)
im = base64.decodebytes(image)
with open("image{}".format(c), 'wb') as f:
f.write(im)
c += 1
for l in range(128):
img = cv2.imread("image{}".format(l), cv2.IMREAD_ANYCOLOR);
# load the neural net. Should be local to this method as its multi-threaded endpoint
nets = load_model(CFG, Weights)
s = do_prediction(img, nets, Lables)
return jsonify(s)
except Exception as e:
print(e)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, threaded=True)
This creates the image files with different sizes but I cannot view them in image viewer. The files being recieved are jpg files. Ignoring that, I went ahead with the processing and I get
TypeError: The view function for 'main' did not return a valid response. The function either returned None or ended without a return statement.
Incorrect padding
Incorrect padding
[INFO] loading YOLO from disk...
'NoneType' object has no attribute 'shape'
Images are being sent like this.
python iWebLens_client.py inputfolder/ http://192.168.29.75:5000/api/object_detection 4
The images are being received like this.
b'"{\\"image\\": \\"/9j/4AAQSkZJRgABAQEASABIAAD/4gxYSUNDX1BST0ZJTEUAAQEAAAxITGlubwIQAABtbnRyUkdCIFhZWiAHzgACAAkABgAxAABhY3NwTVNGVAAAAABJRUMgc1JHQgAAAAAAAAAAAAAAAAAA......fiU05tQopHNf//Z\\", \\"id\\": \\"e3ad9809-b84c-57f1-bd03-a54e25c59bcc\\"}"'
I am thinking I need to decode('utf-8') this, but don't know how.
Currently, you are double-coding the data on the client side. Within requests, the argument passed is already converted to JSON.
Just pass the dict on as a json parameter.
def call_object_detection_service(image):
try:
url = str(sys.argv[2])
data = {}
#generate uuid for image
id = uuid.uuid5(uuid.NAMESPACE_OID, image)
# Encode image into base64 string
with open (image, 'rb') as image_file:
data['image'] = base64.b64encode(image_file.read()).decode('utf-8')
data['id'] = str(id)
headers = {'Content-Type': 'application/json'}
# HERE IS THE CHANGE !!!
response = requests.post(url, json=data, headers=headers)
if response.ok:
output = "Thread : {}, input image: {}, output:{}".format(
threading.current_thread().getName(),
image,
response.text
)
print(output)
else:
print ("Error, response status:{}".format(response))
except Exception as e:
print("Exception in webservice call: {}".format(e))
The data can now be received on the server as JSON and extracted into a dict.
#app.route('/api/object_detection', methods=['POST'])
def main():
data = request.get_json(force=True)
uid = data.get('id')
image = data.get('image')
# ... decode the base64 data here ...
return jsonify(message='done')
Related
I'm creating a backend function so that users of App A can import their details to App B. The flow is like this:
User uploads a zip file on the website. This zip contains csv files.
This zip file flows into S3.
Once in S3, it triggers a Lambda function.
The Lambda function then picks the zip file and starts processing the data inside the csv files.
I've completed Step 1,2 and 3. But in 4, the Lambda function is not able to read/process the file.
The python file works fine on my local device, so I think the issue is that it is not able to "get" the object from S3 correctly and so the read_zip doesn't work.
Relevant code below:
import <relevant libs>
s3Client = boto3.client('s3')
def lambda_handler(event,context):
bucket = event['Records'][0]['s3']['bucket']['name']
filename = event['Records'][0]['s3']['object']['key']
#tried printing these, displaying correctly
#response = s3Client.get_object(Bucket=bucket, Key=filename)
usefile = 'https://' + bucket + '.s3.ap-south-1.amazonaws.com/' + filename
print(usefile)
#printing correct filename
def read_csv(file):
to_return = []
reader = csv.DictReader(TextIOWrapper(file, 'utf-8'))
for row in reader:
to_return.append(row)
return to_return
def read_zip(usefile):
with ZipFile(usefile, 'r') as APPA_file:
with APPA_file.open("file1.csv", mode='r') as f:
file1 = read_csv(f)
with APPA_file.open("file2.csv", mode='r') as f:
file2 = read_csv(f)
return file1, file2
def get_APPB_url(APPA_uri):
resp = requests.get(APPA_uri)
if resp.status_code != 200:
return None
# extract the APPB url
re_match = re.findall('href="(https://www.X.org/.+/)"', resp.text)
if not re_match:
return None
print(resp.text)
return re_match[0]
def rate_on_APPB(APPB_url, rating):
re_match = re.findall('X.org/(.+)/', APPB_url)
if not re_match:
return None
APPB_id = re_match[0]
req_body = {
"query": <query used>,
"operationName": "<xyz>",
"variables": <variables>
}
headers = {
"content-type": "application/json",
"x-hasura-admin-secret": "XXX"
}
resp = requests.post("XXX", json=req_body, headers=headers)
if resp.status_code != 200:
raise ValueError(f"Hasura query failed. Code: {resp.status_code}")
else: print(APPB_id)
json_resp = resp.json()
if 'errors' in json_resp and len(json_resp['errors']) > 0:
first_error_msg = json_resp['errors'][0]['message']
if 'Authentication' in first_error_msg:
print(f"Failed to authenticate with cookie")
exit(1)
else:
raise ValueError(first_error_msg)
def APPA_to_APPB(APPA_dict):
APPB_url = get_APPB_url(APPA_dict['APPA URI'])
if APPB_url is None:
raise ValueError("Cannot find APPB title")
rate_on_APPB(APPB_url, int(float(APPA_dict['Rating']) * 2))
def main():
file1, file2 = read_zip(usefile)
success = []
errors = []
with tqdm(total=len(file1)) as pbar:
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {
executor.submit(APPA_to_APPB, APPA_dict): APPA_dict for APPA_dict in file1
}
try:
for future in concurrent.futures.as_completed(future_to_url):
APPA_dict = future_to_url[future]
pbar.update(1)
try:
success.append(future.result())
except Exception as e:
errors.append({"APPA_dict": APPA_dict, "error": e})
except KeyboardInterrupt:
executor._threads.clear()
concurrent.futures.thread._threads_queues.clear()
print(f"Successfully rated: {len(success)} ")
print(f"{len(errors)} Errors")
for error in errors:
print(f"\t{error['APPA_dict']['Name']} ({error['APPA_dict']['Year']}): {error['error']}")
if __name__ == '__main__':
main()
CloudWatch log basically says Event started and ended successfully, nothing else. I was able to verify that the trigger was working fine by printing the file name and it appears in the log. But that's about it.
i have a problem on my code.
I need to receive some packages post from antennas and receive the same frame from camera, tryng to tyng up the packgae (json) to the frame.
I have an exception like this:
stop() cannot be called before start()
and the code looks like:
import pyrealsense2 as rs
import cv2
import numpy as np
import datetime
from app import app
from flask import request, jsonify
import io, json
i = 0
resp = None
pipeline = None
#app.route("/file-upload", methods=["POST"])
def upload_file():
global pipeline
#print("Create pipeline")
#pipeline = create_pipeline()
pipeline = rs.pipeline()
cfg = rs.config()
cfg.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 45)
pipeline.start(cfg)
string = request.data
string = string.decode('utf-8')
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
Current_Time = datetime.datetime.now() # .time() or .date()
timestampStr = Current_Time.strftime("%d-%b-%Y -- %H-%M-%S.%f")
if not color_frame:
print("Color frame non presente")
color_image = np.asanyarray(color_frame.get_data())
if string is not None:
payload = json.loads(string)
else:
resp = jsonify({'message': "Il payload è vuoto"})
resp.status_code = 404
try:
with io.open("./DatasetProva/Json/" + timestampStr+'.json', 'w', encoding='utf8') as outfile:
# Scriviamo leimmagini sul disco nel path
str_ = json.dumps(payload,indent=4, separators=(',', ': '), ensure_ascii=False)
outfile.write(str_)
#i+=1
resp = jsonify({'message': "E' stato salvato il json"})
resp.status_code = 201
cv2.imwrite("./DatasetProva/Immagini/" + timestampStr + ".jpg", color_image)
pipeline.stop()
return resp
except Exception as ex:
print("Errore sulla scrittura sul disco ", ex)
#i-=1
return resp
if __name__ == "__main__":
app.run(debug=True, host ="192.168.1.191", port="8080")
I receive one package from the antenna every 200/300 milliseconds, is it possible that a new pipeline starts before the old one stops? And how could I solve the problem?
I am using an API gateway and AWS Lamdba function as Proxy to my company's API (C# Web API 2.0)
The Lambda function in written in Python 2.7 and I am using Pyhton's urllib2 to pass the http request to the API.
I encounterd a strange issue When I am sending a json body containing hebrew characters.
The Json is being cut in the middle. I am making sure that the Json sent from the Lambda is complete, but the json body received in the Lambda is being turcated somewhere along the way.
This is the Lambda function:
from __future__ import print_function
import json
import urllib2
import HTMLParser
base = "http://xxxxxx/api"
hparser = HTMLParser.HTMLParser()
def lambda_handler(event, context):
print("Got event\n" + json.dumps(event, indent=2))
# Form URL
url = base + event['queryStringParameters']['rmt']
print('URL = %s' % url)
req = urllib2.Request(url)
if 'body' in event:
if event['body']:
print('BODY = %s' % json.dumps(event['body'], ensure_ascii=False, encoding='utf8') )
req.add_data(json.dumps(event['body'], ensure_ascii=False, encoding='utf8'))
# Copy only some headers
if 'headers' in event:
if event['headers']:
copy_headers = ('Accept', 'Content-Type', 'content-type')
for h in copy_headers:
if h in event['headers']:
print('header added = %s' % event['headers'][h])
req.add_header(h, event['headers'][h])
# Build response
out = {}
headersjsonstr = ('Access-Control-Allow-Origin', '')
response_header = {}
try:
print('Trying here...')
resp = urllib2.urlopen(req)
out['statusCode'] = resp.getcode()
out['body'] = resp.read()
for head in resp.info().headers:
keyval = head.split(':')
if any(keyval[0] in h for h in headersjsonstr):
response_header[keyval[0]] = keyval[1].replace('\r','').replace('\n','').strip()
print('response_header = %s' % response_header )
out['headers'] = response_header
print('status = %s' % out['statusCode'] )
except urllib2.HTTPError as e:
out['statusCode'] = e.getcode()
out['body'] = e.read()
out['headers'] = e.headers
print('status = %s' % out['statusCode'] )
return out
This is the Post request raw body Json
{"company":"שלום","guests":[{"fullname":"אבי","carno":"67"}],"fromdate":"2018-10-10","todate":"2018-10-10","fromtime":"07:31","totime":"07:31","comments":null,"Employee":{"UserId":"ink1445"}}
And this is what I am getting on the API:
"{\"company\":\"שלום\",\"guests\":[{\"fullname\":\"אבי\",\"carno\":\"67\"}],\"fromdate\":\"2018-10-10\",\"todate\":\"2018-10-10\",\"fromtime\":\"07:31\",\"totime\":\"07:31\",\"comments\":null,\"Employee\":{\"UserId\":\"ink1
Again, when I am sending only English letters everything is fine.
Please help!
Thanks
Very likely your json buffer is too small, and you are getting overflow truncation.
The size was probably set assuming ASCII or utf-8 encoding, and your unicode characters are wider (consume more bytes).
Depending on what json package you are using, you may be able to set an option for unicode or you might need to adjust the buffer size manually.
I am creating an API to receive and process images. I have to receive the image in bytearray format. The following is my code to post:
Approach 1
Posting the image to api
with open("test.jpg", "rb") as imageFile:
f = imageFile.read()
b = bytearray(f)
url = 'http://127.0.0.1:5000/lastoneweek'
headers = {'Content-Type': 'application/octet-stream'}
res = requests.get(url, data=b, headers=headers)
##print received json response
print(res.text)
My API: Receiving image at api
#app.route('/lastoneweek', methods=['GET'])
def get():
img=request.files['data']
image = Image.open(io.BytesIO(img))
image=cv2.imread(image)
##do all image processing and return json response
Within my api I have tried, request.get['data'] request.params['data']....I am getting object has no attribute error.
I tried passing the bytearray to json along with width and height of the image like:
Approach 2:Posting image to api
data = '{"IMAGE":b,"WIDTH":16.5,"HEIGHT":20.5}'
url = 'http://127.0.0.1:5000/lastoneweek'
headers = {'Content-Type': 'application/json'}
res = requests.get(url, data=data, headers=headers)
and changed my get function at the API as
Receive image at api
#app.route('/lastoneweek', methods=['GET'])
def get():
data=request.get_json()
w = data['WIDTH']
h = data['HEIGHT']
but have received the following error for example:
TypeError: 'LocalProxy' does not have the buffer interface
server.py file:
from flask import Flask
from flask import request
import cv2
from PIL import Image
import io
import requests
import numpy as np
app = Flask(__name__)
#app.route('/lastoneweek', methods=['POST'])
def get():
print(request.files['image_data'])
img = request.files['image_data']
image = cv2.imread(img.filename)
rows, cols, channels = image.shape
M = cv2.getRotationMatrix2D((cols/2, rows/2), 90, 1)
dst = cv2.warpAffine(image, M, (cols, rows))
cv2.imwrite('output.png', dst)
##do all image processing and return json response
return 'image: success'
if __name__ == '__main__':
try:
app.run()
except Exception as e:
print(e)
with client.py file as:
import requests
with open("test.png", "rb") as imageFile:
# f = imageFile.read()
# b = bytearray(f)
url = 'http://127.0.0.1:5000/lastoneweek'
headers = {'Content-Type': 'application/octet-stream'}
try:
response = requests.post(url, files=[('image_data',('test.png', imageFile, 'image/png'))])
print(response.status_code)
print(response.json())
except Exception as e:
print(e)
# res = requests.put(url, files={'image': imageFile}, headers=headers)
# res = requests.get(url, data={'image': imageFile}, headers=headers)
##print received json response
print(response.text)
I referred this link: http://docs.python-requests.org/en/master/user/advanced/#post-multiple-multipart-encoded-files
This solves the first issue.
The line image = Image.open(io.BytesIO(img)) is wrong since img is a <class 'werkzeug.datastructures.FileStorage'> which should not be passed to io.BytesIO, since it takes bytes-like object as mentioned here: https://docs.python.org/3/library/io.html#io.BytesIO, and explanation of bytes-like object here: https://docs.python.org/3/glossary.html#term-bytes-like-object
So, instead of doing this. Passing filename directly to cv2.imread(img.filename) solved the issue.
This question already has an answer here:
How to send multiple http requests python
(1 answer)
Closed 6 years ago.
I created the following script to download images from an API endpoint which works as intended. Thing is that it is rather slow as all the requests have to wait on each other. What is the correct way to make it possible to still have the steps synchronously for each item I want to fetch, but make it parallel for each individual item. This from an online service called
servicem8
So what I hope to achieve is:
fetch all possible job ids => keep name/and other info
fetch name of the customer
fetch each attachment of a job
These three steps should be done for each job. So I could make things parallel for each job as they do not have to wait on each other.
Update:
Problem I do not understand is how can you make sure that you bundle for example the three calls per item in one call as its only per item that I can do things in parallel so for example when I want to
fetch item( fetch name => fetch description => fetch id)
so its the fetch item I want to make parallel?
The current code I have is working but rather slow:
import requests
import dateutil.parser
import shutil
import os
user = "test#test.com"
passw = "test"
print("Read json")
url = "https://api.servicem8.com/api_1.0/job.json"
r = requests.get(url, auth=(user, passw))
print("finished reading jobs.json file")
scheduled_jobs = []
if r.status_code == 200:
for item in r.json():
scheduled_date = item['job_is_scheduled_until_stamp']
try:
parsed_date = dateutil.parser.parse(scheduled_date)
if parsed_date.year == 2016:
if parsed_date.month == 10:
if parsed_date.day == 10:
url_customer = "https://api.servicem8.com/api_1.0/Company/{}.json".format(item[
'company_uuid'])
c = requests.get(url_customer, auth=(user, passw))
cus_name = c.json()['name']
scheduled_jobs.append(
[item['uuid'], item['generated_job_id'], cus_name])
except ValueError:
pass
for job in scheduled_jobs:
print("fetch for job {}".format(job))
url = "https://api.servicem8.com/api_1.0/Attachment.json?%24filter=related_object_uuid%20eq%20{}".format(job[
0])
r = requests.get(url, auth=(user, passw))
if r.json() == []:
pass
for attachment in r.json():
if attachment['active'] == 1 and attachment['file_type'] != '.pdf':
print("fetch for attachment {}".format(attachment))
url_staff = "https://api.servicem8.com/api_1.0/Staff.json?%24filter=uuid%20eq%20{}".format(
attachment['created_by_staff_uuid'])
s = requests.get(url_staff, auth=(user, passw))
for staff in s.json():
tech = "{}_{}".format(staff['first'], staff['last'])
url = "https://api.servicem8.com/api_1.0/Attachment/{}.file".format(attachment[
'uuid'])
r = requests.get(url, auth=(user, passw), stream=True)
if r.status_code == 200:
creation_date = dateutil.parser.parse(
attachment['timestamp']).strftime("%d.%m.%y")
if not os.path.exists(os.getcwd() + "/{}/{}".format(job[2], job[1])):
os.makedirs(os.getcwd() + "/{}/{}".format(job[2], job[1]))
path = os.getcwd() + "/{}/{}/SC -O {} {}{}".format(
job[2], job[1], creation_date, tech.upper(), attachment['file_type'])
print("writing file to path {}".format(path))
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
print(r.text)
Update [14/10]
I updated the code in the following way with some hints given. Thanks a lot for that. Only thing I could optimize I guess is the attachment downloading but it is working fine now. Funny thing I learned is that you cannot create a CON folder on a windows machine :-) did not know that.
I use pandas as well just to try to avoid some loops in my list of dicts but not sure if I am already most performant. Longest is actually reading in the full json files. I fully read them in as I could not find an API way of just telling the api, return me only the jobs from september 2016. The api query function seems to work on eq/lt/ht.
import requests
import dateutil.parser
import shutil
import os
import pandas as pd
user = ""
passw = ""
FOLDER = os.getcwd()
headers = {"Accept-Encoding": "gzip, deflate"}
import grequests
urls = [
'https://api.servicem8.com/api_1.0/job.json',
'https://api.servicem8.com/api_1.0/Attachment.json',
'https://api.servicem8.com/api_1.0/Staff.json',
'https://api.servicem8.com/api_1.0/Company.json'
]
#Create a set of unsent Requests:
print("Read json files")
rs = (grequests.get(u, auth=(user, passw), headers=headers) for u in urls)
#Send them all at the same time:
jobs,attachments,staffs,companies = grequests.map(rs)
#create dataframes
df_jobs = pd.DataFrame(jobs.json())
df_attachments = pd.DataFrame(attachments.json())
df_staffs = pd.DataFrame(staffs.json())
df_companies = pd.DataFrame(companies.json())
#url_customer = "https://api.servicem8.com/api_1.0/Company/{}.json".format(item['company_uuid'])
#c = requests.get(url_customer, auth=(user, passw))
#url = "https://api.servicem8.com/api_1.0/job.json"
#jobs = requests.get(url, auth=(user, passw), headers=headers)
#print("Reading attachments json")
#url = "https://api.servicem8.com/api_1.0/Attachment.json"
#attachments = requests.get(url, auth=(user, passw), headers=headers)
#print("Reading staff.json")
#url_staff = "https://api.servicem8.com/api_1.0/Staff.json"
#staffs = requests.get(url_staff, auth=(user, passw))
scheduled_jobs = []
if jobs.status_code == 200:
print("finished reading json file")
for job in jobs.json():
scheduled_date = job['job_is_scheduled_until_stamp']
try:
parsed_date = dateutil.parser.parse(scheduled_date)
if parsed_date.year == 2016:
if parsed_date.month == 9:
cus_name = df_companies[df_companies.uuid == job['company_uuid']].iloc[0]['name'].upper()
cus_name = cus_name.replace('/', '')
scheduled_jobs.append([job['uuid'], job['generated_job_id'], cus_name])
except ValueError:
pass
print("{} jobs to fetch".format(len(scheduled_jobs)))
for job in scheduled_jobs:
print("fetch for job attachments {}".format(job))
#url = "https://api.servicem8.com/api_1.0/Attachment.json?%24filter=related_object_uuid%20eq%20{}".format(job[0])
if attachments == []:
pass
for attachment in attachments.json():
if attachment['related_object_uuid'] == job[0]:
if attachment['active'] == 1 and attachment['file_type'] != '.pdf' and attachment['attachment_source'] != 'INVOICE_SIGNOFF':
for staff in staffs.json():
if staff['uuid'] == attachment['created_by_staff_uuid']:
tech = "{}_{}".format(
staff['first'].split()[-1].strip(), staff['last'])
creation_timestamp = dateutil.parser.parse(
attachment['timestamp'])
creation_date = creation_timestamp.strftime("%d.%m.%y")
creation_time = creation_timestamp.strftime("%H_%M_%S")
path = FOLDER + "/{}/{}/SC_-O_D{}_T{}_{}{}".format(
job[2], job[1], creation_date, creation_time, tech.upper(), attachment['file_type'])
# fetch attachment
if not os.path.isfile(path):
url = "https://api.servicem8.com/api_1.0/Attachment/{}.file".format(attachment[
'uuid'])
r = requests.get(url, auth=(user, passw), stream = True)
if r.status_code == 200:
if not os.path.exists(FOLDER + "/{}/{}".format(job[2], job[1])):
os.makedirs(
FOLDER + "/{}/{}".format(job[2], job[1]))
print("writing file to path {}".format(path))
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
print("file already exists")
else:
print(r.text)
General idea is to use asynchronous url requests and there is a python module named grequests for that-https://github.com/kennethreitz/grequests
From Documentation:
import grequests
urls = [
'http://www.heroku.com',
'http://python-tablib.org',
'http://httpbin.org',
'http://python-requests.org',
'http://fakedomain/',
'http://kennethreitz.com'
]
#Create a set of unsent Requests:
rs = (grequests.get(u) for u in urls)
#Send them all at the same time:
grequests.map(rs)
And the resopnse
[<Response [200]>, <Response [200]>, <Response [200]>, <Response [200]>, None, <Response [200]>]