How to detect memory leak in python code? - python

I'm new to machine learning and python both! I want my code to predict the object which is mostly the car in my case.
When I start the script it runs smoothly but after 20 or so pictures it hangs up my system because of the memory leak.
I want this script to run to my whole database which is way much more than 20 pictures.
I have tried pympler tracker to track which objects are taking the most memory -
This is the code I'm trying to run to predict the objects in the picture:
from imageai.Prediction import ImagePrediction
import os
import urllib.request
import mysql.connector
from pympler.tracker import SummaryTracker
tracker = SummaryTracker()
mydb = mysql.connector.connect(
host="localhost",
user="phpmyadmin",
passwd="anshu",
database="python_test"
)
counter = 0
mycursor = mydb.cursor()
sql = "SELECT id, image_url FROM `used_cars` " \
"WHERE is_processed = '0' AND image_url IS NOT NULL LIMIT 1"
mycursor.execute(sql)
result = mycursor.fetchall()
def dl_img(url, filepath, filename):
fullpath = filepath + filename
urllib.request.urlretrieve(url,fullpath)
for eachfile in result:
id = eachfile[0]
print(id)
filename = "image.jpg"
url = eachfile[1]
filepath = "/home/priyanshu/PycharmProjects/untitled/images/"
print(filename)
print(url)
print(filepath)
dl_img(url, filepath, filename)
execution_path = "/home/priyanshu/PycharmProjects/untitled/images/"
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath( os.path.join(execution_path, "/home/priyanshu/Downloads/resnet50_weights_tf_dim_ordering_tf_kernels.h 5"))
prediction.loadModel()
predictions, probabilities = prediction.predictImage(os.path.join(execution_path, "image.jpg"), result_count=1)
for eachPrediction, eachProbability in zip(predictions, probabilities):
per = 0.00
label = ""
print(eachPrediction, " : ", eachProbability)
label = eachPrediction
per = eachProbability
print("Label: " + label)
print("Per:" + str(per))
counter = counter + 1
print("Picture Number: " + str(counter))
sql1 = "UPDATE used_cars SET is_processed = '1' WHERE id = '%s'" % id
sql2 = "INSERT into label (used_car_image_id, object_label, percentage) " \
"VALUE ('%s', '%s', '%s') " % (id, label, per)
print("done")
mycursor.execute(sql1)
mycursor.execute(sql2)
mydb.commit()
tracker.print_diff()
This is the result I'm getting from a single picture and it is consuming whole RAM after some iterations. What change should I do to stop the leaking?
seat_belt : 12.617655098438263
Label: seat_belt
Per:12.617655098438263
Picture Number: 1
done
types | objects | total size
<class 'tuple | 130920 | 11.98 MB
<class 'dict | 24002 | 6.82 MB
<class 'list | 56597 | 5.75 MB
<class 'int | 175920 | 4.70 MB
<class 'str | 26047 | 1.92 MB
<class 'set | 740 | 464.38 KB
<class 'tensorflow.python.framework.ops.Tensor | 6515 |
356.29 KB
<class 'tensorflow.python.framework.ops.Operation._InputList |
6097 | 333.43 KB
<class 'tensorflow.python.framework.ops.Operation | 6097 |
333.43 KB
<class 'SwigPyObject | 6098 | 285.84 KB
<class 'tensorflow.python.pywrap_tensorflow_internal.TF_Output |
4656 | 254.62 KB
<class 'tensorflow.python.framework.traceable_stack.TraceableObject | 3309 | 180.96 KB
<class 'tensorflow.python.framework.tensor_shape.Dimension |
1767 | 96.63 KB
<class 'tensorflow.python.framework.tensor_shape.TensorShapeV1 |
1298 | 70.98 KB
<class 'weakref | 807 | 63.05 KB

In this case the model is loading every time in the for loop with image. The model should be outside the for loop, in that case the model won't start every time and won't take the memory which the program is taking.
Code should work this way ->
execution_path = "/home/priyanshu/PycharmProjects/untitled/images/"
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath( os.path.join(execution_path, "/home/priyanshu/Downloads/resnet50_weights_tf_dim_ordering_tf_kernels.h 5"))
prediction.loadModel()
for eachfile in result:
id = eachfile[0]
print(id)
filename = "image.jpg"
url = eachfile[1]
filepath = "/home/priyanshu/PycharmProjects/untitled/images/"
print(filename)
print(url)
print(filepath)
dl_img(url, filepath, filename)
predictions, probabilities = prediction.predictImage(os.path.join(execution_path, "image.jpg"), result_count=1)
for eachPrediction, eachProbability in zip(predictions, probabilities):
per = 0.00
label = ""
print(eachPrediction, " : ", eachProbability)
label = eachPrediction
per = eachProbability
print("Label: " + label)
print("Per:" + str(per))
counter = counter + 1
print("Picture Number: " + str(counter))
sql1 = "UPDATE used_cars SET is_processed = '1' WHERE id = '%s'" % id
sql2 = "INSERT into label (used_car_image_id, object_label, percentage) " \
"VALUE ('%s', '%s', '%s') " % (id, label, per)
print("done")
mycursor.execute(sql1)
mycursor.execute(sql2)
mydb.commit()
tracker.print_diff()

Related

Adding a newline to the printed data from an imported file

I made the following code, which imports a file and prints its content :
import pandas as pd
file = r"..\test.xlsx"
try:
df = pd.read_excel(file)
#print(df)
except OSError:
print("Impossible to read", file)
test =
df['Date'].map(str) + ' | ' \
+ df['Time'].map(str) + ' | ' \
+ df['Description'].map(str) + ' | ' \
+ '\n'
print(test)
The output is (Edit : I precise that it is printed in an html file) :
20/01 | 17:00 | Text description here1 17/01 | 11:00 | Text
description here2 16/01 | 16:32 | Text description here3 <- In orange
when the the "Urgence" is equal to 3
But what I want is :
20/01 | 17:00 | Text description here1
17/01 | 11:00 | Text description here2
16/01 | 16:32 | Text description here3
I added a new line at the end of my statement + '\n' but it doesn't seem to change anything. How should I proceed ? Thank you.
Edit : I believe that the problem comes from the fact that the entire file is printed, and not line by line so it doesn't add the newline to each line. So I made this code :
test = []
for index, row in df.iterrows():
x = row['Date'] + ' | ' + row['Description'] + '\n'
test.append(x)
print(test)
But the result is the same..
Try this:
test = df['Date'].map(str) + ' | ' +
df['Time'].map(str) + ' | ' +
df['Description'].map(str) + ' | '
list(map(lambda x: print(x), test))
I removed the end of the test string and added the print function.
Let me know if there is any problem :)

Fetch data from multiple result sets - SQL Server to Python

How to union or make the multiple results sets to one, so that I can fetch the data to python dataframe
EXEC sp_MSforeachdb
#command1 = '
IF not exists(select 1 where ''?'' in (''master'',''model'',''msdb'',''tempdb'',''dpa_dpa_sw1'',''dpa_repository''))
EXEC [?].dbo.sp_MSforeachtable
#command1 = ''SELECT TOP 1 db_name() AS DB_Name, ''''&'''' AS Table_name , * from &'', #replacechar=''&'' '
Python Trial
def db_connect ():
server, username, password = 'xx.xx.xx.xx', 'sa', 'xxxx'
conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};'
'SERVER=' + server
+ ';UID=' + username + ';PWD=' + password)
cursor = conn.cursor()
query = ("EXEC sp_MSforeachdb #command1 = 'IF not exists(select 1 where ''?'' "
"in (''master'',''model'',''msdb'',''tempdb'')) EXEC [?].dbo.sp_MSforeachtable"
" #command1 = ''SELECT TOP 1 db_name() AS DB_Name, ''''&'''' AS Table_name"
" , * from &'', #replacechar=''&'' ';")
df = pd.read_sql(query, conn)
conn.close()
return df
df = db_connect()
Result
| DB_Name | Table_name | id | _NAME | _NUMERICID | _VALUE | _TIMESTAMP | _QUALITY |
|---------|---------------------|----|-----------------|------------|--------|-------------------------|----------|
| aaaa | [dbo].[aaa_exhaust] | 1 | aaaa_vib.00.41 | 0 | 2085 | 2022-08-06 00:30:43.517 | 192 |
In the above case I get only 1st result set, I need results of all result sets in dataframe.
I'm not sure if Pandas can handle multiple result sets like that. You can always use pyodbc's cursor functionality to iterate the multiple result sets, though, and construct a DataFrame like the following...
import pandas as pd
import pyodbc
server, username, password = "127.0.0.1,1433", "sa", "StrongPassw0rd"
connstring = "DRIVER={ODBC Driver 17 for SQL Server};SERVER="+server+";UID="+username+";PWD="+password
conn = pyodbc.connect(connstring)
cursor = conn.cursor().execute("""
select 1 as A;
select 2 as B;
select 3 as C;
""")
buffer = []
while True:
# Get the column names for the current result set
columnNames = [col[0] for col in cursor.description]
# Get the data rows for the current result set
for dataRow in cursor.fetchall():
buffer.append({name: dataRow[index] for index, name in enumerate(columnNames)})
# Iterate to the next result set, otherwise we're "done"
if not cursor.nextset():
break
cursor.close()
conn.close()
# Convert the buffer to a DataFrame and display the result
df = pd.DataFrame(buffer)
print(df.to_string())
Which outputs the following:
A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0

Webscraper using BeautifulSoup on Python

I have an assignment to make a web scraper using BeautifulSoup.
There are certain functions defined in the code.
How do I pass the bs4.element.ResultSet to another function and extract relevant data in the form of a dictionary as bs4.element.ResultSet is passed as a python list file.
Here is the code I have been able to write so far, comments are included for further information.
def fetchWebsiteData(url_website):
"""Fetches rows of tabular data from given URL of a website with data excluding table headers.
Parameters
----------
url_website : str
URL of a website
Returns
-------
bs4.element.ResultSet
"""
web_page_data = ''
####
req = requests.get(url_website)
soup= BeautifulSoup(req.text,'html.parser')
web_page_data = soup.find_all('tbody')
####
return web_page_data
The other function that I am using:
def fetchVaccineDoses(web_page_data):
"""Fetch the Vaccine Doses available from the Web-page data and provide Options to select the respective Dose.
Parameters
----------
web_page_data : bs4.element.ResultSet
All rows of Tabular data fetched from a website excluding the table headers
Returns
-------
dict
Dictionary with the Doses available and Options to select, with Key as 'Option' and Value as 'Command'
Example
-------
>>> url_website = "https://www.mooc.e-yantra.org/task-spec/fetch-mock-covidpage"
>>> web_page_data = fetchWebsiteData(url_website)
>>> print(fetchVaccineDoses(web_page_data))
{'1': 'Dose 1', '2': 'Dose 2'}
"""
vaccine_doses_dict = {}
####
for dose in web_page_data:
dose = dose.find_all('td', class_="dose_num")
for k in dose:
#print (k.next_element)
if (k.next_element) == 1:
vaccine_doses_dict['1'] = "Dose 1"
else:
vaccine_doses_dict['2'] = "Dose 2"
####
return vaccine_doses_dict
And:
def fetchAgeGroup(web_page_data, dose):
"""Fetch the Age Groups for whom Vaccination is available from the Web-page data for a given Dose
and provide Options to select the respective Age Group.
Parameters
----------
web_page_data : bs4.element.ResultSet
All rows of Tabular data fetched from a website excluding the table headers
dose : str
Dose available for Vaccination and its availability for the Age Groups
Returns
-------
dict
Dictionary with the Age Groups (for whom Vaccination is available for a given Dose) and Options to select,
with Key as 'Option' and Value as 'Command'
Example
-------
>>> url_website = "https://www.mooc.e-yantra.org/task-spec/fetch-mock-covidpage"
>>> web_page_data = fetchWebsiteData(url_website)
>>> print(fetchAgeGroup(web_page_data, '1'))
{'1': '18+', '2': '45+'}
>>> print(fetchAgeGroup(web_page_data, '2'))
{'1': '18+', '2': '45+'}
"""
age_group_dict = {}
####
####
return age_group_dict
PS-> I am pretty new to programming and learning so please mind the bad code.
PPS->This sort of thing is what I want to make
So, I modifed your code a little bit:
Example 01 - Not what Questioner wants! [Without Output]
Example 02 - Customized what Questioner want to have [With Output]
Example 03 - How I do the work [With Output]
Example 01:
from bs4 import BeautifulSoup
import requests
def fetchWebsiteData(url_website):
web_page_data = ''
####
req = requests.get(url_website)
soup= BeautifulSoup(req.text,'html.parser')
####
####Changes here
return soup
def fetchVaccineDoses(web_page_data):
dose_count = 0
vaccine_doses_dict = {}
####
doses = web_page_data.find_all('td', class_="dose_num")
####Changes here too
for dose in doses:
vaccine_doses_dict[dose_count] = "Dose " + dose.text
dose_count = dose_count + 1
####
return vaccine_doses_dict
####Changes here too - Complete your empty function with code -
####I hope it is your output what you want to see.
def fetchAgeGroup(web_page_data, doses):
age_count = 0
####
ages = web_page_data.find_all('td', class_="age")
for age in ages:
doses[age_count] = doses[age_count] + " | Age " + age.text
age_count = age_count + 1
####
return doses
url_website = "https://www.mooc.e-yantra.org/task-spec/fetch-mock-covidpage"
web_page_data = fetchWebsiteData(url_website)
VaccineDoses = fetchVaccineDoses(web_page_data)
Ages = fetchAgeGroup(web_page_data, VaccineDoses)
####Ages element 0 can delete because it contains no numbers
####It is only the table header
del Ages[0]
print(Ages)
Example 02:
So, here the result code of your picture example.
from bs4 import BeautifulSoup
import requests
def fetchWebsiteData(url_website):
web_page_data = ''
####
req = requests.get(url_website)
soup= BeautifulSoup(req.text,'html.parser')
####
return soup
def fetchVaccineDoses(web_page_data):
dose_count = 0
vaccine_doses_dict = {}
####
doses = web_page_data.find_all('td', class_="dose_num")
for dose in doses:
if "Dose " + dose.text in vaccine_doses_dict.keys():
vaccine_doses_dict["Dose " + dose.text] += 1
else:
vaccine_doses_dict["Dose " + dose.text] = 1
dose_count = dose_count + 1
####
#### Table header remove
del vaccine_doses_dict["Dose Dose"]
return vaccine_doses_dict
def fetchAgeGroup(web_page_data, doseNum):
age_dict = {}
age_count = 0
####
lines = web_page_data.find_all('tr')
for line in lines:
if age_count == 0:
##Ignore table header
age_count += 1
continue
if fetchOnlyNumber(line.find("td", class_="dose_num").text) == doseNum:
current_age = line.find("td", class_="age").text
if current_age in age_dict.keys():
age_dict[current_age] += 1
else:
age_dict[current_age] = 1
####
return age_dict
def fetchOnlyNumber(inputStr):
number = 0
for sChar in inputStr:
if sChar.isdigit():
number = (number * 10) + int(sChar)
else:
return -1
return number
def fetchUserInput():
### Integer only
while True:
print("Choose: ", end="")
usrInput = fetchOnlyNumber( input() )
if usrInput > 0:
return usrInput
else:
if usrInput == 0:
###Exit
return 0
else:
print("Wrong input! Only numbers are allowed!")
url_website = "https://www.mooc.e-yantra.org/task-spec/fetch-mock-covidpage"
web_page_data = fetchWebsiteData(url_website)
VaccineDoses = fetchVaccineDoses(web_page_data)
indexVacc = 1
print(">>> Select the Dose of Vaccination:")
for VaccineDose, amount in VaccineDoses.items():
print("{0} :- {1} [Amount: {2}]".format(indexVacc, VaccineDose, amount))
indexVacc += 1
print("0 :- Exit")
doseNum = fetchUserInput()
if doseNum == 0:
exit(0)
print("<<< Dose Selected: {0}".format(doseNum))
print(">>> Select the Age Group:")
indexAge = 0
Ages = fetchAgeGroup(web_page_data, doseNum)
for age, amount in Ages.items():
print("{0} :- {1} [Amount: {2}]".format(indexAge, age, amount))
indexAge += 1
Output of Example 02:
>>> Select the Dose of Vaccination:
1 :- Dose 1 [Amount: 33]
2 :- Dose 2 [Amount: 42]
0 :- Exit
Choose: 2
<<< Dose Selected: 2
>>> Select the Age Group:
0 :- 45+ [Amount: 29]
1 :- 18+ [Amount: 13]
Example 03:
from bs4 import BeautifulSoup
import requests
def fetchWebsiteData(url_website):
web_page_data = ''
####
req = requests.get(url_website)
soup= BeautifulSoup(req.text,'html.parser')
####
return soup
def fetchAllPossibleData(web_page_data):
data_collaction_dict = {}
doses = {}
data_index = 0
### FOR LOOP BEGINS
for tr in web_page_data.find_all('tr'):
if data_index == 0:
#table header ignore
data_index += 1
continue
hospital_name = tr.find("td", class_="hospital_name").text
state_name = tr.find("td", class_="state_name").text
district_name = tr.find("td", class_="district_name").text
vaccine_name = tr.find("td", class_="vaccine_name").text
dose_num = tr.find("td", class_="dose_num").text
age = tr.find("td", class_="age").text
### dict = { Key: Value, Key: Value, ... }
data_collaction_dict[data_index] = {
"Hospital": hospital_name,
"State": state_name,
"District": district_name,
"Vaccine": vaccine_name,
"Dose": dose_num,
"Age": age
}
### Count "Dose 1" and "Dose 2"
if dose_num in doses:
doses[dose_num] += 1
else:
doses[dose_num] = 1
data_index += 1
### FOR LOOP ENDS
data_collaction_dict["AmountDose"] = doses
return data_collaction_dict
def fetchOnlyNumber(inputStr):
number = 0
for sChar in inputStr:
if sChar.isdigit():
number = (number * 10) + int(sChar)
else:
return -1
return number
def fetchUserInput():
### Integer only
while True:
print("Choose: ", end="")
usrInput = fetchOnlyNumber( input() )
if usrInput > 0:
return usrInput
else:
if usrInput == 0:
###Exit
return 0
else:
print("Wrong input! Only numbers are allowed!")
url_website = "https://www.mooc.e-yantra.org/task-spec/fetch-mock-covidpage"
web_page_data = fetchWebsiteData(url_website)
whole_data = fetchAllPossibleData(web_page_data)
dose_index = 1
print(">>> Select the Dose of Vaccination:")
for dose, amount in whole_data["AmountDose"].items():
print("{0} :- Dose {1} [Amount: {2}]".format(dose_index, dose, amount))
dose_index += 1
print("0 :- Exit")
doseNum = fetchUserInput()
if doseNum == 0:
exit(0)
print("<<< Dose Selected: {0}".format(doseNum))
print(">>> Select the Age Group:")
for key in whole_data.keys():
if key == "AmountDose":
continue
if int(whole_data[key]["Dose"]) == doseNum:
print( "Hospital: {0} | Vaccine: {1} | Dose: {2} | Age: {3}".format(
whole_data[key]["Hospital"],
whole_data[key]["Vaccine"],
whole_data[key]["Dose"],
whole_data[key]["Age"]))
print('-' * 100)
Output of Example 03:
>>> Select the Dose of Vaccination:
1 :- Dose 1 [Amount: 33]
2 :- Dose 2 [Amount: 42]
0 :- Exit
Choose: 1
<<< Dose Selected: 1
>>> Select the Age Group:
Hospital: Apollo Hospital | Vaccine: Covaxin | Dose: 1 | Age: 45+
--------------------------------------------------------------------------------
Hospital: Springedge Care | Vaccine: Covaxin | Dose: 1 | Age: 18+
--------------------------------------------------------------------------------
Hospital: West Valley Medical Center | Vaccine: Covaxin | Dose: 1 | Age: 45+
--------------------------------------------------------------------------------
Hospital: Zenlife Clinic | Vaccine: Covaxin | Dose: 1 | Age: 18+
--------------------------------------------------------------------------------
Hospital: Family Wellness Center | Vaccine: Covishield | Dose: 1 | Age: 18+
--------------------------------------------------------------------------------
Hospital: Tranquil Valley Hospital Center | Vaccine: Covaxin | Dose: 1 | Age: 45+
--------------------------------------------------------------------------------
Hospital: SevenHills | Vaccine: Covishield | Dose: 1 | Age: 18+
--------------------------------------------------------------------------------
.... more output with python script Exapmle 03 - but I think above is enough....

Unicode error python

Here is gist of the problem.
I am trying get data from a REST API call and storing them in a database.
Then I running few queries to find out TOP 3 users. I could not pack all the list values that I am getting from MySQL to a JSON file.
I am unable to get past the following issue.
File "/Users/id1/Downloads/user1.py", line 58, in
get_last_three_installed_user
results.append(dict(zip(columns, row)))
TypeError: 'unicode' object is not callable
This is the output of a SQL query
+----------------+--------+-------------+------------+-----------------+
| name | gender | nationality | registered | registered_date |
+----------------+--------+-------------+------------+-----------------+
| mélissa robin | female | FR | 1437761753 | 2015-07-24 |
| agathe fabre | female | FR | 1437002837 | 2015-07-15 |
| soline morin | female | FR | 1436138376 | 2015-07-05 |
+----------------+--------+-------------+------------+-----------------+
If I try str(name) I am getting following error:
name = str(json_dict["results"][result]["user"]["name"]["first"]) +"
"+ str(json_dict["results"][result]["user"]["name"]["last"])
UnicodeEncodeError: 'ascii' codec can't encode character u'\xe4' in
position 1: ordinal not in range(128)
Here is my code:
def get_last_three_installed_user(file_type):
count_sql = "select name,gender,nationality,registered,DATE_FORMAT(from_unixtime(registered), '%Y-%m-%d') registered_date from install_user order by from_unixtime(registered) desc limit 3 "
curs.execute(count_sql)
columns = [column[0] for column in curs.description]
results = []
if file_type == 'csv':
fp = open('user_list.csv', 'w')
csvFile = csv.writer(fp)
rows = curs.fetchall()
csvFile.writerows(rows)
else:
with open('file_count.json', 'w') as outfile:
for row in curs.fetchall():
results.append(dict(zip(columns, row)))
print results
output = {"TableData": results}
json.dump(output, outfile, sort_keys = True, indent = 4, ensure_ascii=False)
This code pretty much took care of it.
def get_last_three_installed_user(file_type):
count_sql = "select name,gender,nationality,registered,DATE_FORMAT(from_unixtime(registered), '%Y-%m-%d') registered_date from install_user order by from_unixtime(registered) desc limit 1,3 "
curs.execute(count_sql)
results = []
dict1 ={}
if file_type == 'csv':
fp = open('user_list.csv', 'w')
csvFile = csv.writer(fp)
rows = curs.fetchall()
csvFile.writerows(rows)
else:
with open('file_count.json', 'w') as outfile:
for row in curs.fetchall():
for idx, col in enumerate(curs.description):
dict1[col[0]] = row[idx]
results.append(dict1)
output = {"TableData": results}
json.dump(output, outfile, sort_keys = True, indent = 4, ensure_ascii=False)

Python clipping database returned variables

I'm busy with a script to get some data from the database and arrange it for output later and I would like to know how to clip the variables being returned
I have my script
# CONNECT TO DATABASE
#=====================================================================
varPgSQL = connectToDatabase()
# PREPARE SQL STATEMENTS
#=====================================================================
cur_users = varPgSQL.cursor()
cur_users.execute("prepare cur_users as " +
"select * from users " +
"where usr_idno > $1")
cur_details = varPgSQL.cursor()
cur_details.execute("prepare cur_details as " +
"select * from details " +
"where dtl_usr_idno = $1")
# EXECUTE SQL STATEMENTS
#=====================================================================
la_results = []
cur_users.execute("execute cur_users(%s)", str(0))
for lr_user in cur_users:
cur_details.execute("execute cur_details(%s)", str(lr_user[0]))
for lr_detail in cur_details:
# STORE RESULTS
la_results.append({
"usr_idno": lr_user[0],
"usr_name": lr_user[1],
"dtl_usr_idno": lr_detail[0],
"dtl_usr_accn": lr_detail[1],
"dtl_usr_accs": lr_detail[2]
})
# CLOSE CONNECTION
#=====================================================================
varPgSQL.close()
# CHECK RESULTS
#=====================================================================
for lr_result in la_results:
print(
" | " +
lr_result["usr_name"] +
" | " +
lr_result["dtl_usr_accn"] +
" | " +
lr_result["dtl_usr_accs"] +
" | "
)
The output of this code though is not clipping the variables, the output is
| mavis | service acc | active |
Which is what I expected because it's the length of the fields in the database but is it possible to clip the variables for output to achieve
| mavis | service acc | active |
If the gaps are being created by whitespace you can use the built in String method strip()
If its is a database artifact you may have to give us more information.

Categories

Resources