@@ -157,7 +157,7 @@ def update_TLE(old_tle,y0):
157
157
tle_elements ['element_number' ]= old_tle .element_number
158
158
return TLE (tle_elements )
159
159
160
- def newton_method (tle_0 , time_mjd , target_state = None , new_tol = 1e-12 ,max_iter = 50 ):
160
+ def newton_method (tle_0 , time_mjd , target_state = None , new_tol = 1e-12 ,max_iter = 50 , verbose = False ):
161
161
"""
162
162
This method performs Newton method starting from an initial TLE and a given propagation time. The objective
163
163
is to find a TLE that accurately reconstructs the propagated state, at observation time.
@@ -220,15 +220,17 @@ def newton_method(tle_0, time_mjd, target_state=None, new_tol=1e-12,max_iter=50)
220
220
dY [0 ]= - float (y0 [3 ])* 0.9999
221
221
dY = torch .tensor ([0. ,0. ,0. ]+ list (dY )+ [0. ], requires_grad = True )
222
222
if tol < new_tol :
223
- print (f"F(y): { np .linalg .norm (F )} " )
224
- print (f"Solution found, at iter: { i } " )
223
+ if verbose :
224
+ print (f"F(y): { np .linalg .norm (F )} " )
225
+ print (f"Solution found, at iter: { i } " )
225
226
return next_tle , y0 #+dY
226
227
else :
227
228
#Newton update:
228
229
#y0=y0+dY
229
230
y0 = torch .tensor ([float (el1 )+ float (el2 ) for el1 , el2 in zip (list (y0 ),list (dY ))],requires_grad = True )
230
231
next_tle = update_TLE (next_tle , y0 )
231
232
i += 1
232
- print ("Solution not found, returning best found so far" )
233
- print (f"F(y): { np .linalg .norm (F )} " )
233
+ if verbose :
234
+ print ("Solution not found, returning best found so far" )
235
+ print (f"F(y): { np .linalg .norm (F )} " )
234
236
return next_tle , y0
0 commit comments