These pine script code is from trading view and write by everget. This indicator is Chandelier Exit. I am doing back test by using this indicator so I need convert this pine script code to python in order to get the buy and sell signal.
I am using GBPJPY data from 31/12/2019 to 1/1/2022, 4hrs charts. But the buysignal and sellsignal total just have 6 signal, i feel like something wrong with the result. Therefore, I am thinking do I convert correctly the code from pine script to python?
Python code :
# ATR calculation
atr_period = 2
h, l, c_prev,c = df['high'], df['low'], df['close'].shift(1),df['close']
tr = np.max([h - l, (c_prev - h).abs(), (c_prev - l).abs()], axis=0)
df['atr'] = pd.Series(tr).rolling(2).mean().bfill().values
# CE calculation
df['c1'] = pd.Series(h).rolling(2).max().bfill().values # Get the max value within two row
df['c2'] = pd.Series(l).rolling(2).min().bfill().values # Get the min value within two row
df['cel'] = df['c1'] - df['atr']*4
df['ces'] = df['c2'] + df['atr']*4
sh = df['ces'].to_numpy(dtype='float') # Convert to array
lo = df['cel'].to_numpy(dtype='float')
sh_prev = (df['ces'].shift(1)).to_numpy(dtype='float') # Shift downward to get the previous data in ces,cel and close
lo_prev = (df['cel'].shift(1)).to_numpy(dtype='float')
c_prev = (df['close'].shift(1)).to_numpy(dtype='float')
shnew = np.empty(3126, dtype=float) # Create new array to save new data
lonew = np.empty(3126, dtype=float)
for i in range(len(df['close'])): # Compare the previous close to previous cel
if c_prev[i] > lo_prev[i]:
lonew[i] = max(lo[i], lo_prev[i])
else:
lonew[i] = lo[i]
for i in range(len(df['close'])):# Compare the previous close to previous ces
if c_prev[i] < sh_prev[i]:
shnew[i] = min(sh[i], sh_prev[i])
else:
shnew[i] = sh[i]
# Create array for direction
# Follow the CE indicator to identify the direction
direc = np.empty(3126, dtype=int)
direc[3125] = 1 # Put a initial data into the last array
for i in range(len(df['close'])): # Compare the close to CE to get the direction
if c[i] > shnew[i]:
direc[i] = 1
elif c[i] < lonew[i]:
direc[i] = -1
else:
direc[i] = direc[i-1]
dfdir = pd.DataFrame(direc,columns=['direc'],index=df.index) # Convert the direction to dataframe
df = pd.merge(df, dfdir,left_index=True,right_index=True) # Merge the direction into ori dataframe
df['direc_prev'] = df['direc'].shift(1) # Shift downward to get the previous data
df['buysignal'] = np.logical_and(df['direc'] == 1,df['direc_prev'] == -1) # Get the buy signal (long)
df['sellsignal'] = np.logical_and(df['direc'] == -1,df['direc_prev'] == 1) # Get the sell signal (short)
Pine script code in trading view:
//#version=4
// Copyright (c) 2019-present, Alex Orekhov (everget)
// Chandelier Exit script may be freely distributed under the terms of the GPL-3.0 license.
study("Chandelier Exit", shorttitle="CE", overlay=true)
length = input(title="ATR Period", type=input.integer, defval=22)
mult = input(title="ATR Multiplier", type=input.float, step=0.1, defval=3.0)
showLabels = input(title="Show Buy/Sell Labels ?", type=input.bool, defval=true)
useClose = input(title="Use Close Price for Extremums ?", type=input.bool, defval=true)
highlightState = input(title="Highlight State ?", type=input.bool, defval=true)
atr = mult * atr(length)
longStop = (useClose ? highest(close, length) : highest(length)) - atr
// if useClose = false
// longstop = highest(length) - atr (want this)
// else
// longstop = highest(close, length)
longStopPrev = nz(longStop[1], longStop)
// nz(x,y) = if previous value is valid then take x else y
longStop := close[1] > longStopPrev ? max(longStop, longStopPrev) : longStop
// if close[1] > longStopPrev
// longstop = max(longStop, longStopPrev)
// else
// longstop = longstop
shortStop = (useClose ? lowest(close, length) : lowest(length)) + atr
shortStopPrev = nz(shortStop[1], shortStop)
shortStop := close[1] < shortStopPrev ? min(shortStop, shortStopPrev) : shortStop
var int dir = 1
dir := close > shortStopPrev ? 1 : close < longStopPrev ? -1 : dir
// if close > shortStopPrev
// dir = 1
// elseif close < longStopPrev
// dir = -1
//else
// dir = dir[-1]
var color longColor = color.green
var color shortColor = color.red
buySignal = dir == 1 and dir[1] == -1
// and = 1 & 1 > 1
plotshape(buySignal and showLabels ? longStop : na, title="Buy Label", text="Buy", location=location.absolute, style=shape.labelup, size=size.tiny, color=longColor, textcolor=color.white, transp=0)
sellSignal = dir == -1 and dir[1] == 1
plotshape(sellSignal and showLabels ? shortStop : na, title="Sell Label", text="Sell", location=location.absolute, style=shape.labeldown, size=size.tiny, color=shortColor, textcolor=color.white, transp=0)
I have an issue with my code. I need the script to remove fields which fill all three conditions:
the CreatedBy is koala,
Book is PI or SI or II or OT or FG,
and the Category **is ** Cert or CertPlus or Cap or Downside.
Currently my code removes all koala and all books and only takes the last argument. So for example my current output leaves fields only if the category is different. I would like it to show fields ONLY if all 3 arguments are met and not if koala or book = PI or SI or II or OT or FG and to show everything else which is in range.
If field is created by koala and category is Cert I wish to see this field but now it is removed.
Or if none of the arguments are met I also want to see those fields ( e.g. createdby is Extra, Book is NG and Category is Multiple. Now those are also removed from the output.
Example dataset:
In the link below - I wish to remove only those marked red:
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
a_path, q_path = 0, 0
def assign_path(current_path, a_path = 0, q_path = 0):
files = os.listdir(current_path)
for i in files:
if re.search('(?i)activity',i):
a_path = '\\'.join([current_path,i])
elif re.search('(?i)query',i):
q_path = '\\'.join([current_path,i])
return a_path, q_path
a_path, q_path = assign_path(current_path)
if a_path == 0 or q_path == 0:
files = os.listdir(current_path)
directories = []
for i in files:
if os.path.isdir(i): directories.append(i)
for i in directories:
if re.search('(?i)input',i):
a_path, q_path = assign_path('\\'.join([current_path,i]), a_path, q_path)
L = list(range(len(qr)))
L1 = list(range(len(qr2)))
L2 = list(range(len(ac)))
-------------------------------------------------------
qr = pd.read_excel(q_path)
qr2 = pd.read_excel(q_path)
qr_rec = qr2.iloc[[0,1]]
d = qr2.iloc[0].to_dict()
for i in list(d.keys()): d[i] = np.nan
for i in range(len(qr2)):
if qr2.iloc[i]['LinkageLinkType'] != 'B2B_COUNTER_TRADE'\
and qr2.iloc[i]['CreatedBy'] == 'koala_'\
and qr2.iloc[i]['Book'] in {'PI','SI','II','OT','FG'}\
and qr2.iloc[i]['Category'] not in {'Cert','CertPlus','Cap','Downside'}:
while i in L: L.remove(i)
if qr2.iloc[i]['PrimaryRiskId'] not in list(aID):
qr_rec = qr_rec.append(qr2.iloc[i],ignore_index=True)
I have added the beggining of the code which allows me to use the Excel file. I have two files, one of them being a_path ( please disregard this one). The issue I have is on the q_path.
Check this out:
pd.read_csv('stackoverflow.csv')
category book createdby
0 Multiple NG panda
1 Cert DG koala
2 Cap PI monkey
3 CertPlus ZZ panda
4 Cap ll joey
5 Cert OT koala
6 Cap FG koala
7 Cert PI koala
8 Block SI koala
9 Cap II koala
df.query("~(category in ['Cert', 'Cap'] and book in ['OT', 'FG', 'PI', 'II'] and createdby=='koala')")
category book createdby
0 Multiple NG panda
1 Cert DG koala
2 Cap PI monkey
3 CertPlus ZZ panda
4 Cap ll joey
8 Block SI koala
pd.DataFrame.query can be used to filter data, the ~ at the beginning is a not operator.
BR
E
I'm struggling with Tesseract OCR.
I have a blood examination image, it has a table with indentation. Although tesseract recognizes the characters very well, its structure isn't preserved in the final output. For example, look the lines below "Emocromo con formula" (Eng. Translation: blood count with formula) that are indented. I want to preserve that indentation.
I read the other related discussions and I found the option preserve_interword_spaces=1. The result became slightly better but as you can see, it isn't perfect.
Any suggestions?
Update:
I tried Tesseract v5.0 and the result is the same.
Code:
Tesseract version is 4.0.0.20190314
from PIL import Image
import pytesseract
# Preserve interword spaces is set to 1, oem = 1 is LSTM,
# PSM = 1 is Automatic page segmentation with OSD - Orientation and script detection
custom_config = r'-c preserve_interword_spaces=1 --oem 1 --psm 1 -l eng+ita'
# default_config = r'-c -l eng+ita'
extracted_text = pytesseract.image_to_string(Image.open('referto-1.jpg'), config=custom_config)
print(extracted_text)
# saving to a txt file
with open("referto.txt", "w") as text_file:
text_file.write(extracted_text)
Result with comparison:
GITHUB:
I have created a GitHub repository if you want to try it yourself.
Thanks for your help and your time
image_to_data() function provides much more information. For each word it will return it's bounding rectangle. You can use that.
Tesseract segments the image automatically to blocks. Then you can sort block by their vertical position and for each block you can find mean character width (that depends on the block's recognized font). Then for each word in the block check if it is close to the previous one, if not add spaces accordingly. I'm using pandas to ease on calculations, but it's usage is not necessary. Don't forget that the result should be displayed using monospaced font.
import pytesseract
from pytesseract import Output
from PIL import Image
import pandas as pd
custom_config = r'-c preserve_interword_spaces=1 --oem 1 --psm 1 -l eng+ita'
d = pytesseract.image_to_data(Image.open(r'referto-2.jpg'), config=custom_config, output_type=Output.DICT)
df = pd.DataFrame(d)
# clean up blanks
df1 = df[(df.conf!='-1')&(df.text!=' ')&(df.text!='')]
# sort blocks vertically
sorted_blocks = df1.groupby('block_num').first().sort_values('top').index.tolist()
for block in sorted_blocks:
curr = df1[df1['block_num']==block]
sel = curr[curr.text.str.len()>3]
char_w = (sel.width/sel.text.str.len()).mean()
prev_par, prev_line, prev_left = 0, 0, 0
text = ''
for ix, ln in curr.iterrows():
# add new line when necessary
if prev_par != ln['par_num']:
text += '\n'
prev_par = ln['par_num']
prev_line = ln['line_num']
prev_left = 0
elif prev_line != ln['line_num']:
text += '\n'
prev_line = ln['line_num']
prev_left = 0
added = 0 # num of spaces that should be added
if ln['left']/char_w > prev_left + 1:
added = int((ln['left'])/char_w) - prev_left
text += ' ' * added
text += ln['text'] + ' '
prev_left += len(ln['text']) + added + 1
text += '\n'
print(text)
This code will produce following output:
ssseeess+ SERVIZIO SANITARIO REGIONALE Pagina 2 di3
seoeeeees EMILIA-RROMAGNA
©2888 800
©9868 6 006 : pe ‘ ‘ "
«ee ##e#ecee Azienda Unita Sanitaria Locale di Modena
Seat se ces Amends Ospedaliero-Universitaria Policlinico di Modena
Dipartimento interaziendale ad attivita integrata di Medicina di Laboratorio e Anatomia Patologica
Direttore dr. T.Trenti
Ospedale Civile S.Agostino-Estense
S.C. Medicina di Laboratorio
S.S. Patologia Clinica - Corelab
Sistema di Gestione per la Qualita certificato UNI EN ISO 9001:2015
Responsabile dr.ssa M.Varani
Richiesta (CDA): 49/073914 Data di accettazione: 18/12/2018
Data di check-in: 18/12/2018 10:27:06
Referto del 18/12/2018 16:39:53
Provenienza: D4-cp sassuolo
Sig.
Data di Nascita:
Domicilio:
ANALISI RISULTATO __UNITA'DI MISURA VALORI DI RIFERIMENTO
Glucosio 95 mg/dl (70 - 110 )
Creatinina 1.03 mg/dl ( 0.50 - 1.40 )
eGFR Filtrato glomerulare stimato >60 ml/min Cut-off per rischio di I.R.
7 <60. Il calcolo é€ riferito
Equazione CKD-EPI ad una superfice corporea
Standard (1,73 mq)x In Caso
di etnia afroamericana
moltiplicare per il fattore
1,159.
Colesterolo 212 * mg/dl < 200 v.desiderabile
Trigliceridi 106 mg/dl < 180 v.desiderabile
Bilirubina totale 0.60 mg/dl ( 0.16 - 1.10 )
Bilirubina diretta 0.10 mg/dl ( 0.01 - 0.3 )
GOT - AST 17 U/L (1-37)
GPT - ALT ay U/L (1- 40 )
Gamma-GT 15 U/L (1-55)
Sodio 142 mEq/L ( 136 - 146 )
Potassio 4.3 mEq/L (3.5 - 5.3)
Vitamina B12 342 pg/ml ( 200 - 960 )
TSH 5.47 * ulU/ml (0.35 - 4.94 )
FT4 9.7 pg/ml (7 = 15)
Urine chimico fisico morfologico
u-Colore giallo paglierino
u-Peso specifico 1.012 ( 1.010 - 1.027 )
u-pH 5.5 (5.5 - 6.5)
u-Glucosio assente mg/dl assente
u-Proteine assente mg/dl (0 -10 )
u-Emoglobina assente mg/dl assente
u-Corpi chetonici assente mg/dl assente
u-Bilirubina assente mg/dl assente
u-Urobilinogeno 0.20 mg/dl (0- 1.0 )
sedimento non significativo
Il Laureato:
Dott. CRISTINA ROTA
Per ogni informazione o chiarimento sugli aspetti medici, puo rivolgersi al suo medico curante
Referto firmato elettronicamente secondo le norme vigenti: Legge 15 marzo 1997, n. 59; D.P.R. 10 novembre 1997, n.513;
D.P.C.M. 8 febbraio 1999; D.P.R 28 dicembre 2000, n.445; D.L. 23 gennaio 2002, n.10.
Certificato rilasciato da: Infocamere S.C.p.A. (http://www.card.infocamere. it)
i! Laureato: Dr. CRISTINA ROTA
1! documento informatico originale 6 conservato presso Parer - Polo Archivistico della Regione Emilia-Romagna
I want to set a limit for my feature_importances_ output using DataFrame.
Below is my code (refer from this blog):
train = df_visualization.sample(frac=0.9,random_state=639)
test = df_visualization.drop(train.index)
train.to_csv('train.csv',encoding='utf-8')
test.to_csv('test.csv',encoding='utf-8')
train_dis = train.iloc[:,:66]
train_val = train_dis.values
train_in = train_val[:,:65]
train_out = train_val[:,65]
test_dis = test.iloc[:,:66]
test_val = test_dis.values
test_in = test_val[:,:65]
test_out = test_val[:,65]
dt = tree.DecisionTreeClassifier(random_state=59,criterion='entropy')
dt = dt.fit(train_in,train_out)
score = dt.score(train_in,train_out)
test_predicted = dt.predict(test_in)
# Print the feature ranking
print("Feature ranking:")
print (DataFrame(dt.feature_importances_, columns = ["Imp"], index = train.iloc[:,:65].columns).sort_values(['Imp'], ascending = False))
My problem now is it display all 65 features.
Output :
Imp
wbc 0.227780
age 0.100949
gcs 0.069359
hr 0.069270
rbs 0.053418
sbp 0.052067
Intubation-No 0.050729
... ...
Babinski-Normal 0.000000
ABG-Metabolic Alkolosis 0.000000
ABG-Respiratory Acidosis 0.000000
Reflexes-Unilateral Hyperreflexia 0.000000
NS-No 0.000000
For example I just want top 5 features only.
Expected output:
Imp
wbc 0.227780
age 0.100949
gcs 0.069359
hr 0.069270
rbs 0.053418
Update :
I got the way to display using itertuples.
display = pd.DataFrame(dt.feature_importances_, columns = ["Imp"], index = train.iloc[:,:65].columns).sort_values(['Imp'], ascending = False)
x=0
for row,col in display.itertuples():
if x<5:
print(row,"=",col)
else:
break
x++
Output :
Feature ranking:
wbc = 0.227780409582
age = 0.100949241154
gcs = 0.0693593476192
hr = 0.069270425399
rbs = 0.0534175402602
But I want to know whether this is the efficient way to get the output?
Try this:
indices = np.argsort(dt.feature_importances_)[::-1]
for i in range(5):
print " %s = %s" % (feature_cols[indices[i]], dt.feature_importances_[indices[i]])
I am trying to calculate a satellites Range Rate using Python and pyephem. Unfortunately pyephems result seems to be wrong.
After comparing the value with calculations made by other satellite tracking programs like GPredict or Ham Radio Deluxe the the difference goes up to 2km/sec.The calculated values for the Azemuth and Elevation ankle are almost the same thought. TLE's are new and the system clock is the same.
Do you see any mistake I made in my code or do you have an idea what else could cause the error?
Thank you very much!
Here is my Code:
import ephem
import time
#TLE Kepler elements
line1 = "ESTCUBE 1"
line2 = "1 39161U 13021C 13255.21187718 .00000558 00000-0 10331-3 0 3586"
line3 = "2 39161 98.1264 332.9982 0009258 190.0328 170.0700 14.69100578 18774"
satellite = ephem.readtle(line1, line2, line3) # create ephem object from tle information
while True:
city = ephem.Observer() # recreate Oberserver with current time
city.lon, city.lat, city.elevation = '52.5186' , '13.4080' , 100
satellite.compute(city)
RangeRate = satellite.range_velocity/1000 # get RangeRate in km/sec
print ("RangeRate: " + str(RangeRate))
time.sleep(1)
I recorded some Range Rate values from the script and from GPRedict to make the error reproducibly:
ESTCUBE 1
1 39161U 13021C 13255.96108453 .00000546 00000-0 10138-3 0 3602
2 39161 98.1264 333.7428 0009246 187.4393 172.6674 14.69101320 18883
date: 2013-09-13
time pyephem-Script Gpredict
14:07:02 -1.636 -3.204
14:12:59 -2.154 -4.355
14:15:15 -2.277 -4.747
14:18:48 -2.368 -5.291
And I added some lines to calculate the satellites elevation and coordinates:
elevation = satellite.elevation
sat_latitude = satellite.sublat
sat_longitude = satellite.sublong
The results with time stamp are:
2013-09-13 14:58:13
RangeRate: 2.15717797852 km/s
Range: 9199834.0
Sat Elevation: 660743.6875
Sat_Latitude: -2:22:27.3
Sat_Longitude: -33:15:15.4
2013-09-13 14:58:14
RangeRate: 2.15695092773 km/s
Range: 9202106.0
Sat Elevation: 660750.9375
Sat_Latitude: -2:26:05.8
Sat_Longitude: -33:16:01.7
Another important information might be that I am trying to calculate the Doppler Frequency for a satellite pass. And therefore I need the Range Rate:
f_Doppler_corrected = (c0/(c0 + RangeRate))*f0
Range Rate describes the velocity of a moving object on the visual axis to the observer. Maybe the range_velocity is something different?
It seems pyephem (libastro as a backend) and gpredict (predict) as a backend use different ways to calculate the satellite velocity. I am attaching detailed output of comparison for an actual reference observation. It can be seen that both output the correct position, while only gpredict outputs reasonable range_rate values. The error seems to occur in the satellite velocity vector. I would say that the reasons from gpredict are more reasonable (and the similar code is with question marks in libastro ..) therefore I will propose a fix in libastro to handle it as in gpredict, however maybe someone who understands the math behind it can add to this.
I added another tool, PyPredict (also predict based), to get some calculations here. However these values are off, so must be something else.
Pyephem: 3.7.5.3
Gpredict: 1.3
PyPredict 1.1 (Git: 10/02/2015)
OS: Ubuntu x64
Python 2.7.6
Time:
Epoch timestamp: 1420086600
Timestamp in milliseconds: 1420086600000
Human time (GMT): Thu, 01 Jan 2015 04:30:00 GMT
ISS (ZARYA)
1 25544U 98067A 15096.52834639 .00016216 00000-0 24016-3 0 9993
2 25544 51.6469 82.0200 0006014 185.1879 274.8446 15.55408008936880
observation point: N0 E0 alt=0
Test 1:
Gpredict: (Time, Az, El, Slant Range, Range Velocity)
2015 01 01 04:30:00 202.31 -21.46 5638 -5.646
2015 01 01 04:40:00 157.31 -2.35 2618 -3.107
2015 01 01 04:50:00 72.68 -10.26 3731 5.262
Pyephem 3.7.5.3 (default atmospheric refraction)
(2015/1/1 04:30:00, 202:18:45.3, -21:27:43.0, 5638.0685, -5.3014228515625)
(2015/1/1 04:40:00, 157:19:08.3, -1:21:28.6, 2617.9915, -2.934402099609375)
(2015/1/1 04:50:00, 72:40:59.9, -10:15:15.1, 3730.78375, 4.92381201171875)
No atmospheric refraction
(2015/1/1 04:30:00, 202:18:45.3, -21:27:43.0, 5638.0685, -5.3014228515625)
(2015/1/1 04:40:00, 157:19:08.3, -1:21:28.6, 2617.9915, -2.934402099609375)
(2015/1/1 04:50:00, 72:40:59.9, -10:15:15.1, 3730.78375, 4.92381201171875)
Pypredict
1420086600.0
{'decayed': 0, 'elevation': -19.608647085869123, 'name': 'ISS (ZARYA)', 'norad_id': 25544, 'altitude': 426.45804846615556, 'orbit': 92208, 'longitude': 335.2203454719759, 'sunlit': 1, 'geostationary': 0, 'footprint': 4540.173580837984, 'epoch': 1420086600.0, 'doppler': 1635.3621339278857, 'visibility': 'D', 'azimuth': 194.02436209048014, 'latitude': -45.784314563471646, 'orbital_model': 'SGP4', 'orbital_phase': 73.46488929141783, 'eclipse_depth': -8.890253049060693, 'slant_range': 5311.3721164183535, 'has_aos': 1, 'orbital_velocity': 27556.552465256085}
1420087200.0
{'decayed': 0, 'elevation': -6.757496200551716, 'name': 'ISS (ZARYA)', 'norad_id': 25544, 'altitude': 419.11153234752874, 'orbit': 92208, 'longitude': 9.137628905963876, 'sunlit': 1, 'geostationary': 0, 'footprint': 4502.939901708917, 'epoch': 1420087200.0, 'doppler': 270.6901377419433, 'visibility': 'D', 'azimuth': 139.21315598291235, 'latitude': -20.925997669236732, 'orbital_model': 'SGP4', 'orbital_phase': 101.06301876416072, 'eclipse_depth': -18.410968838249545, 'slant_range': 3209.8444916123644, 'has_aos': 1, 'orbital_velocity': 27568.150821416708}
1420087800.0
{'decayed': 0, 'elevation': -16.546383900323555, 'name': 'ISS (ZARYA)', 'norad_id': 25544, 'altitude': 414.1342802649042, 'orbit': 92208, 'longitude': 31.52356804788407, 'sunlit': 1, 'geostationary': 0, 'footprint': 4477.499436144489, 'epoch': 1420087800.0000002, 'doppler': -1597.032808834609, 'visibility': 'D', 'azimuth': 76.1840387294104, 'latitude': 9.316828913183791, 'orbital_model': 'SGP4', 'orbital_phase': 128.66115193399546, 'eclipse_depth': -28.67721196244149, 'slant_range': 4773.838774518728, 'has_aos': 1, 'orbital_velocity': 27583.591664378775}
Test 2 (short time):
Gpredict: (Slant Range, Range Velocity)
2015 01 01 04:30:00 5638 -5.646
2015 01 01 04:30:10 5581 -5.648
->5.7 km/s avg
(2015/1/1 04:30:00, 5638.0685, -5.3014228515625)
(2015/1/1 04:30:10, 5581.596, -5.30395361328125)
->5.7 km/s avg
Pyephem
import ephem
import time
#TLE Kepler elements
line1 = "ISS (ZARYA)"
line2 = "1 25544U 98067A 15096.52834639 .00016216 00000-0 24016-3 0 9993"
line3 = "2 25544 51.6469 82.0200 0006014 185.1879 274.8446 15.55408008936880"
satellite = ephem.readtle(line1, line2, line3) # create ephem object from tle information
obs = ephem.Observer() # recreate Oberserver with current time
obs.lon, obs.lat, obs.elevation = '0' , '0' , 0
print('Pyephem Default (atmospheric refraction)')
obs.date = '2015/1/1 04:30:00'
satellite.compute(obs)
print(obs.date, satellite.az, satellite.alt,satellite.range/1000, satellite.range_velocity/1000)
obs.date = '2015/1/1 04:40:00'
satellite.compute(obs)
print(obs.date, satellite.az, satellite.alt,satellite.range/1000, satellite.range_velocity/1000)
obs.date = '2015/1/1 04:50:00'
satellite.compute(obs)
print(obs.date, satellite.az, satellite.alt,satellite.range/1000, satellite.range_velocity/1000)
obs.pressure = 0 # disable atmospheric refraction
print('Pyephem No atmospheric refraction')
obs.date = '2015/1/1 04:30:00'
satellite.compute(obs)
print(obs.date, satellite.az, satellite.alt,satellite.range/1000, satellite.range_velocity/1000)
obs.date = '2015/1/1 04:40:00'
satellite.compute(obs)
print(obs.date, satellite.az, satellite.alt,satellite.range/1000, satellite.range_velocity/1000)
obs.date = '2015/1/1 04:50:00'
satellite.compute(obs)
print(obs.date, satellite.az, satellite.alt,satellite.range/1000, satellite.range_velocity/1000)
print('10 s timing')
obs.date = '2015/1/1 04:30:00'
satellite.compute(obs)
print(obs.date, satellite.range/1000, satellite.range_velocity/1000)
obs.date = '2015/1/1 04:30:10'
satellite.compute(obs)
print(obs.date, satellite.range/1000, satellite.range_velocity/1000)
Pypredict
import predict
import datetime
import time
format = '%Y/%m/%d %H:%M:%S'
tle = """ISS (ZARYA)
1 25544U 98067A 15096.52834639 .00016216 00000-0 24016-3 0 9993
2 25544 51.6469 82.0200 0006014 185.1879 274.8446 15.55408008936880"""
qth = (0, 10, 0) # lat (N), long (W), alt (meters)
#expect time as epoch time float
time= (datetime.datetime.strptime('2015/1/1 04:30:00', format) -datetime.datetime(1970,1,1)).total_seconds()
result = predict.observe(tle, qth, time)
print time
print result
time= (datetime.datetime.strptime('2015/1/1 04:40:00', format) -datetime.datetime(1970,1,1)).total_seconds()
result = predict.observe(tle, qth, time)
print time
print result
time= (datetime.datetime.strptime('2015/1/1 04:50:00', format) -datetime.datetime(1970,1,1)).total_seconds()
result = predict.observe(tle, qth, time)
print time
print result
Debug output of Gpredict and PyEphem
PyPredict
Name = ISS (ZARYA)
current jd = 2457023.68750
current mjd = 42003.7
satellite jd = 2457119.02835
satellite mjd = 42099
SiteLat = 0
SiteLong = 6.28319
SiteAltitude = 0
se_EPOCH : 115096.52834638999775052071
se_XNO : 0.06786747737871574870
se_XINCL : 0.90140843391418457031
se_XNODEO : 1.43151903152465820312
se_EO : 0.00060139998095110059
se_OMEGAO : 3.23213863372802734375
se_XMO : 4.79694318771362304688
se_BSTAR : 0.00024016000679694116
se_XNDT20 : 0.00000000049135865048
se_orbit : 93688
dt : -137290.81880159676074981689
CrntTime = 42004.2
SatX = -3807.5
SatY = 2844.85
SatZ = -4854.26
Radius = 6793.68
SatVX = -5.72752
SatVY = -3.69533
SatVZ = 2.32194
SiteX = -6239.11
SiteY = 1324.55
SiteZ = 0
SiteVX = -0.0965879
SiteVY = -0.454963
Height = 426.426
SSPLat = -0.795946
SSPLong = 0.432494
Azimuth = 3.53102
Elevation = -0.374582
Range = 5638.07
RangeRate = -5.30142
(2015/1/1 04:30:00, 5638.0685, -5.3014228515625)
Gpredict
time: 2457023,687500
pos obs: -6239,093574, 1324,506494, 0,000000
pos sat: -3807,793748, 2844,641722, -4854,112635
vel obs: -0,096585, -0,454962, 0,000000
vel sat: -6,088242, -3,928388, 2,468585
Gpredict (sgp_math.h)
/------------------------------------------------------------------/
/* Converts the satellite's position and velocity */
/* vectors from normalised values to km and km/sec */
void
Convert_Sat_State( vector_t *pos, vector_t *vel )
{
Scale_Vector( xkmper, pos );
Scale_Vector( xkmper*xmnpda/secday, vel );
} /* Procedure Convert_Sat_State */
Ephem (Libastro)
*SatX = ERAD*posvec.x/1000; /* earth radii to km */
*SatY = ERAD*posvec.y/1000;
*SatZ = ERAD*posvec.z/1000;
*SatVX = 100*velvec.x; /* ?? */
*SatVY = 100*velvec.y;
*SatVZ = 100*velvec.z;
Updating to the most recent release of pyephem (I tried V3.7.6.0) seems to solve the problem. The range rate now agrees closely with the values given by other commonly used tracking software.