From 76dc3975ac8a5cd43f0dfc628bb35f9f53d6238f Mon Sep 17 00:00:00 2001 From: Parick Smith Date: Thu, 10 Oct 2024 14:15:15 -0400 Subject: [PATCH 1/7] pip8 compliance in dbConnection_old.py --- pgamit/dbConnection_old.py | 96 +++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 38 deletions(-) diff --git a/pgamit/dbConnection_old.py b/pgamit/dbConnection_old.py index b9828522..dd5e5b18 100644 --- a/pgamit/dbConnection_old.py +++ b/pgamit/dbConnection_old.py @@ -3,7 +3,8 @@ Date: 02/16/2017 Author: Demian D. Gomez -This class is used to connect to the database and handles inserts, updates and selects +This class is used to connect to the database +and handles inserts, updates and selects. It also handles the error, info and warning messages """ @@ -11,7 +12,6 @@ import configparser import inspect import re -from datetime import datetime from decimal import Decimal # deps @@ -36,19 +36,24 @@ def debug(s): file_append('/tmp/db.log', "DB: %s\n" % s) -class dbErrInsert (Exception): pass +class dbErrInsert (Exception): + pass -class dbErrUpdate (Exception): pass +class dbErrUpdate(Exception): + pass -class dbErrConnect(Exception): pass +class dbErrConnect(Exception): + pass -class dbErrDelete (Exception): pass +class dbErrDelete (Exception): + pass -class IntegrityError(pg.IntegrityError): pass +class IntegrityError(pg.IntegrityError): + pass class Cnn(pg.DB): @@ -64,8 +69,8 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): 'database': DB_NAME} self.active_transaction = False - self.options = options - + self.options = options + # parse session config file config = configparser.ConfigParser() @@ -74,8 +79,10 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): except FileNotFoundError: if write_cfg_file: create_empty_cfg() - print(' >> No gnss_data.cfg file found, an empty one has been created. Replace all the necessary ' - 'config and try again.') + print( + ' >> No gnss_data.cfg file found, ' + 'an empty one has been created.' + 'Replace all the necessary config and try again.') exit(1) else: raise @@ -87,10 +94,10 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): for i in range(3): try: pg.DB.__init__(self, - host = options['hostname'], - user = options['username'], - passwd = options['password'], - dbname = options['database']) + host=options['hostname'], + user=options['username'], + passwd=options['password'], + dbname=options['database']) # set casting of numeric to floats pg.set_typecast('Numeric', float) pg.set_decimal(float if use_float else @@ -98,7 +105,7 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): except pg.InternalError as e: err = e if 'Operation timed out' in str(e) or \ - 'Connection refused' in str(e): + 'Connection refused' in str(e): continue else: raise e @@ -108,10 +115,10 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): raise dbErrConnect(err) # open a conenction to a cursor - self.cursor_conn = pgdb.connect(host = options['hostname'], - user = options['username'], - password = options['password'], - database = options['database']) + self.cursor_conn = pgdb.connect(host=options['hostname'], + user=options['username'], + password=options['password'], + database=options['database']) self.cursor = self.cursor_conn.cursor() @@ -119,7 +126,7 @@ def query(self, command, *args): err = None for i in range(3): try: - #print('log-db-query', command, args) + # print('log-db-query', command, args) rs = pg.DB.query(self, command, *args) except ValueError as e: # connection lost, attempt to reconnect @@ -128,7 +135,9 @@ def query(self, command, *args): else: break else: - raise Exception('dbConnection.query failed after 3 retries. Last error was: ' + str(err)) + raise Exception( + ('dbConnection.query failed after 3 retries. ' + 'Last error was: ') + str(err)) debug(" QUERY: command=%r args=%r" % (command, args)) # debug(" ->RES: %s" % repr(rs)) @@ -151,7 +160,10 @@ def query_float(self, command, as_dict=False): else: break else: - raise Exception('dbConnection.query_float failed after 3 retries. Last error was: ' + str(err)) + raise Exception( + ('dbConnection.query_float failed after 3 retries. ' + 'Last error was: ') + + str(err)) recordset = rs.dictresult() if as_dict else rs.getresult() @@ -163,11 +175,13 @@ def query_float(self, command, as_dict=False): return recordset def get_columns(self, table): - tblinfo = self.query('select column_name, data_type from information_schema.columns where table_name=\'%s\'' - % table).dictresult() + tblinfo = self.query( + ('select column_name, data_type from ' + 'information_schema.columns where table_name=\'%s\'') + % table).dictresult() - return { field['column_name'] : field['data_type'] - for field in tblinfo } + return {field['column_name']: field['data_type'] + for field in tblinfo} def begin_transac(self): # do not begin a new transaction with another one active. @@ -186,12 +200,12 @@ def rollback_transac(self): self.rollback() def insert(self, table, row=None, **kw): - debug("INSERT: table=%r row=%r kw=%r" % (table,row,kw)) + debug("INSERT: table=%r row=%r kw=%r" % (table, row, kw)) err = None for i in range(3): try: - #print('log-db-insert', table, row, kw) + # print('log-db-insert', table, row, kw) pg.DB.insert(self, table, row, **kw) except ValueError as e: # connection lost, attempt to reconnect @@ -202,7 +216,9 @@ def insert(self, table, row=None, **kw): else: break else: - raise dbErrInsert('dbConnection.insert failed after 3 retries. Last error was: ' + str(err)) + raise dbErrInsert( + 'dbConnection.insert failed after 3 retries. Last error was: ' + + str(err)) def executemany(self, sql, parameters): debug("EXECUTEMANY: sql=%r parameters=%r" % (sql, parameters)) @@ -221,7 +237,7 @@ def update(self, table, row=None, **kw): err = None for i in range(3): try: - #print('log-db-update', table, row, kw) + # print('log-db-update', table, row, kw) pg.DB.update(self, table, row, **kw) except ValueError as e: # connection lost, attempt to reconnect @@ -232,7 +248,9 @@ def update(self, table, row=None, **kw): else: break else: - raise dbErrUpdate('dbConnection.update failed after 3 retries. Last error was: ' + str(err)) + raise dbErrUpdate( + 'dbConnection.update failed after 3 retries. Last error was: ' + + str(err)) def delete(self, table, row=None, **kw): debug("DELETE: table=%r row=%r kw=%r" % (table, row, kw)) @@ -240,7 +258,7 @@ def delete(self, table, row=None, **kw): err = None for i in range(3): try: - #print('log-db-delete', table, row, kw) + # print('log-db-delete', table, row, kw) pg.DB.delete(self, table, row, **kw) except ValueError as e: # connection lost, attempt to reconnect @@ -251,7 +269,9 @@ def delete(self, table, row=None, **kw): else: break else: - raise dbErrDelete('dbConnection.delete failed after 3 retries. Last error was: ' + str(err)) + raise dbErrDelete( + 'dbConnection.delete failed after 3 retries. Last error was: ' + + str(err)) def insert_event(self, event): debug("EVENT: event=%r" % (event.db_dict())) @@ -269,7 +289,8 @@ def insert_event_bak(self, type, module, desc): desc = re.sub(r'BASH.*', '', desc) desc = re.sub(r'PSQL.*', '', desc) - # warn = self.query('SELECT * FROM events WHERE "EventDescription" = \'%s\'' % (desc)) + # warn = self.query('SELECT * FROM events WHERE + # "EventDescription" = \'%s\'' % (desc)) # if warn.ntuples() == 0: self.insert('events', EventType=type, EventDescription=desc) @@ -291,8 +312,7 @@ def __del__(self): def _caller_str(): # get the module calling to make clear how is logging this message frame = inspect.stack()[2] - line = frame[2] + line = frame[2] caller = frame[3] - - return '[%s:%s(%s)]\n' % (platform.node(), caller, str(line)) + return '[%s:%s(%s)]\n' % (platform.node(), caller, str(line)) From 90c9c9e77f1d9b9f83761e697c2dc5f661657c7e Mon Sep 17 00:00:00 2001 From: Parick Smith Date: Thu, 10 Oct 2024 14:27:50 -0400 Subject: [PATCH 2/7] pip8 compliance in dbConnection.py --- pgamit/dbConnection.py | 91 +++++++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 33 deletions(-) diff --git a/pgamit/dbConnection.py b/pgamit/dbConnection.py index da09c56b..e22146ec 100644 --- a/pgamit/dbConnection.py +++ b/pgamit/dbConnection.py @@ -3,7 +3,8 @@ Date: 02/16/2017 Author: Demian D. Gomez -This class is used to connect to the database and handles inserts, updates and selects +This class is used to connect to the database +and handles inserts, updates and selects. It also handles the error, info and warning messages """ @@ -38,7 +39,9 @@ def cast_array_to_float(recordset): new_record = [] for field in record: if isinstance(field, list): - new_record.append([float(value) if isinstance(value, Decimal) else value for value in field]) + new_record.append( + [float(value) if isinstance(value, Decimal) + else value for value in field]) else: if isinstance(field, Decimal): new_record.append(float(field)) @@ -54,7 +57,8 @@ def cast_array_to_float(recordset): for key, value in record.items(): if isinstance(value, Decimal): record[key] = float(value) - elif isinstance(value, list) and all(isinstance(i, Decimal) for i in value): + elif (isinstance(value, list) + and all(isinstance(i, Decimal) for i in value)): record[key] = [float(i) for i in value] return recordset @@ -91,19 +95,24 @@ def debug(s): file_append('/tmp/db.log', "DB: %s\n" % s) -class dbErrInsert (Exception): pass +class dbErrInsert (Exception): + pass -class dbErrUpdate (Exception): pass +class dbErrUpdate (Exception): + pass -class dbErrConnect(Exception): pass +class dbErrConnect(Exception): + pass -class dbErrDelete (Exception): pass +class dbErrDelete (Exception): + pass -class DatabaseError(psycopg2.DatabaseError): pass +class DatabaseError(psycopg2.DatabaseError): + pass class Cnn(object): @@ -116,8 +125,8 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): 'database': DB_NAME} self.active_transaction = False - self.options = options - + self.options = options + # parse session config file config = configparser.ConfigParser() @@ -126,7 +135,8 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): except FileNotFoundError: if write_cfg_file: create_empty_cfg() - print(' >> No gnss_data.cfg file found, an empty one has been created. Replace all the necessary ' + print(' >> No gnss_data.cfg file found, an empty one ' + 'has been created. Replace all the necessary ' 'config and try again.') exit(1) else: @@ -143,9 +153,11 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): # Define the custom type for an array of decimals DECIMAL_ARRAY_TYPE = psycopg2.extensions.new_type( - (psycopg2.extensions.DECIMAL.values,), # This matches the type codes for DECIMAL + (psycopg2.extensions.DECIMAL.values,), + # This matches the type codes for DECIMAL 'DECIMAL_ARRAY', # Name of the type - lambda value, curs: [float(d) for d in value] if value is not None else None + lambda value, curs: + [float(d) for d in value] if value is not None else None ) psycopg2.extensions.register_type(DEC2FLOAT) @@ -155,11 +167,14 @@ def __init__(self, configfile, use_float=False, write_cfg_file=False): err = None for i in range(3): try: - self.cnn = psycopg2.connect(host=options['hostname'], user=options['username'], - password=options['password'], dbname=options['database']) + self.cnn = psycopg2.connect(host=options['hostname'], + user=options['username'], + password=options['password'], + dbname=options['database']) self.cnn.autocommit = True - self.cursor = self.cnn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + self.cursor = self.cnn.cursor( + cursor_factory=psycopg2.extras.RealDictCursor) debug("Database connection established") @@ -179,8 +194,8 @@ def query(self, command): return query_obj(self.cursor) def query_float(self, command, as_dict=False): - # deprecated: using psycopg2 now solves the problem of returning float numbers - # still in to maintain backwards compatibility + # deprecated: using psycopg2 now solves the problem of returning float + # numbers still in to maintain backwards compatibility if not as_dict: cursor = self.cnn.cursor() @@ -195,18 +210,23 @@ def query_float(self, command, as_dict=False): def get(self, table, filter_fields, return_fields): """ - Selects from the given table the records that match filter_fields and returns ONE dictionary. - Method should not be used to retrieve more than one single record. + Selects from the given table the records that match filter_fields and + returns ONE dictionary. Method should not be used to retrieve more + than one single record. + Parameters: table (str): The table to select from. - filter_fields (dict): The dictionary where the keys are the field names and the values are the filter values. + filter_fields (dict): The dictionary where the keys are the field + names and the values are the filter values. return_fields (list of str): The fields to return. Returns: - list: A list of dictionaries, each representing a record that matches the filter. + list: A list of dictionaries, each representing a record that + matches the filter. """ - where_clause = ' AND '.join([f'"{key}" = %s' for key in filter_fields.keys()]) + where_clause = ' AND '.join([f'"{key}" = %s' + for key in filter_fields.keys()]) fields_clause = ', '.join([f'"{field}"' for field in return_fields]) query = f'SELECT {fields_clause} FROM {table} WHERE {where_clause}' values = list(filter_fields.values()) @@ -225,7 +245,9 @@ def get(self, table, filter_fields, return_fields): raise e def get_columns(self, table): - tblinfo = self.query('select column_name, data_type from information_schema.columns where table_name=\'%s\'' + tblinfo = self.query(('select column_name, data_type from' + 'information_schema.columns where ' + 'table_name=\'%s\'') % table).dictresult() return {field['column_name']: field['data_type'] for field in tblinfo} @@ -262,13 +284,15 @@ def insert(self, table, **kw): def update(self, table, row, **kwargs): """ - Updates the specified table with new field values. The row(s) are updated based on the primary key(s) - indicated in the 'row' dictionary. New values are specified in kwargs. Field names must be enclosed - with double quotes to handle camel case names. + Updates the specified table with new field values. The row(s) are + updated based on the primary key(s) indicated in the 'row' dictionary. + New values are specified in kwargs. Field names must be enclosed with + double quotes to handle camel case names. Parameters: table (str): The table to update. - row (dict): The dictionary where the keys are the primary key fields and the values are the row's identifiers. + row (dict): The dictionary where the keys are the primary key fields + and the values are the row's identifiers. kwargs: New field values for the row. """ # Build the SET clause of the query @@ -293,7 +317,8 @@ def update(self, table, row, **kwargs): def delete(self, table, **kw): """ - Deletes row(s) from the specified table based on the provided keyword arguments. + Deletes row(s) from the specified table based on the provided + keyword arguments. Parameters: table (str): The table to delete from. @@ -332,7 +357,8 @@ def insert_event_bak(self, type, module, desc): desc = re.sub(r'BASH.*', '', desc) desc = re.sub(r'PSQL.*', '', desc) - # warn = self.query('SELECT * FROM events WHERE "EventDescription" = \'%s\'' % (desc)) + # warn = self.query('SELECT * FROM events WHERE + # "EventDescription" = \'%s\'' % (desc)) # if warn.ntuples() == 0: self.insert('events', EventType=type, EventDescription=desc) @@ -354,8 +380,7 @@ def __del__(self): def _caller_str(): # get the module calling to make clear how is logging this message frame = inspect.stack()[2] - line = frame[2] + line = frame[2] caller = frame[3] - - return '[%s:%s(%s)]\n' % (platform.node(), caller, str(line)) + return '[%s:%s(%s)]\n' % (platform.node(), caller, str(line)) From dfad6fb27a27339118dc5e6b42b95013c5acf55b Mon Sep 17 00:00:00 2001 From: Parick Smith Date: Thu, 10 Oct 2024 17:15:34 -0400 Subject: [PATCH 3/7] pep8 compliance in pyArchiveStruct --- pgamit/pyArchiveStruct.py | 302 +++++++++++++++++++++++--------------- 1 file changed, 185 insertions(+), 117 deletions(-) diff --git a/pgamit/pyArchiveStruct.py b/pgamit/pyArchiveStruct.py index 011efe94..5b7bee78 100644 --- a/pgamit/pyArchiveStruct.py +++ b/pgamit/pyArchiveStruct.py @@ -3,25 +3,22 @@ Date: 02/16/2017 Author: Demian D. Gomez -This class handles the interface between the directory structure of the rinex archive and the databased records. -It can be used to retrieve a rinex path based on a rinex database record -It can also scan the dirs of a supplied path for d.Z and station.info files (the directories and files have to match the +This class handles the interface between the directory structure of the rinex +archive and the databased records. It can be used to retrieve a rinex path +based on a rinex database record. It can also scan the dirs of a supplied path +for d.Z and station.info files (the directories and files have to match the declared directory structure and {stmn}{doy}{session}.{year}d.Z, respectively) """ import os -import sys -import re # deps import scandir # app -from pgamit import pyDate from pgamit import pyOptions from pgamit import pyEvents from pgamit import Utils -from pgamit import pyRinex from pgamit import pyRinexName from pgamit.pyRinexName import RinexNameFormat from pgamit.Utils import file_try_remove @@ -35,11 +32,13 @@ def __init__(self, cnn): self.archiveroot = None # read the structure definition table - self.levels = cnn.query('SELECT rinex_tank_struct.*, keys.* FROM rinex_tank_struct ' - 'LEFT JOIN keys ON keys."KeyCode" = rinex_tank_struct."KeyCode" ' + self.levels = cnn.query('SELECT rinex_tank_struct.*, keys.* FROM ' + 'rinex_tank_struct ' + 'LEFT JOIN keys ON keys."KeyCode" = ' + 'rinex_tank_struct."KeyCode" ' 'ORDER BY "Level"').dictresult() - self.keys = cnn.query('SELECT * FROM keys').dictresult() + self.keys = cnn.query('SELECT * FROM keys').dictresult() # read the station and network tables self.networks = cnn.query('SELECT * FROM networks').dictresult() self.stations = cnn.query('SELECT * FROM stations').dictresult() @@ -48,35 +47,42 @@ def __init__(self, cnn): def insert_rinex(self, record=None, rinexobj=None): """ - Insert a RINEX record and file into the database and archive. If only record is provided, only insert into db - If only rinexobj is provided, then RinexRecord of rinexobj is used for the insert. If both are given, then - RinexRecord overrides the passed record. + Insert a RINEX record and file into the database and archive. + If only record is provided, only insert into db. + If only rinexobj is provided, then RinexRecord of rinexobj + is used for the insert. + If both are given, then RinexRecord overrides the passed record. + :param record: a RinexRecord dictionary to make the insert to the db :param rinexobj: the pyRinex object containing the file being processed :param rnxaction: accion to perform to rinexobj. - :return: True if insertion was successful. False if no insertion was done. + :return: True if insertion was successful. + False if no insertion was done. """ if record is None and rinexobj is None: - raise ValueError('insert_rinex exception: both record and rinexobj cannot be None.') + raise ValueError(('insert_rinex exception: both record and ' + 'rinexobj cannot be None.')) if rinexobj is not None: record = rinexobj.record - copy_succeeded = False + copy_succeeded = False archived_crinex = '' # check if record exists in the database - if not self.get_rinex_record(NetworkCode = record['NetworkCode'], - StationCode = record['StationCode'], - ObservationYear = record['ObservationYear'], - ObservationDOY = record['ObservationDOY'], - Interval = record['Interval'], - Completion = float('%.3f' % record['Completion'])): + if not self.get_rinex_record(NetworkCode=record['NetworkCode'], + StationCode=record['StationCode'], + ObservationYear=record['ObservationYear'], + ObservationDOY=record['ObservationDOY'], + Interval=record['Interval'], + Completion=float('%.3f' + % record['Completion'])): # no record, proceed - # check if we need to perform any rinex operations. We might be inserting a new record, but it may just be - # a ScanRinex op where we don't copy the file into the archive + # check if we need to perform any rinex operations. We might be + # inserting a new record, but it may just be a ScanRinex op where + # we don't copy the file into the archive if rinexobj is not None: # is the rinex object correctly named? rinexobj.apply_file_naming_convention() @@ -92,21 +98,26 @@ def insert_rinex(self, record=None, rinexobj=None): # a rinexobj was passed, copy it into the archive. path2archive = os.path.join(self.Config.archive_path, - self.build_rinex_path(record['NetworkCode'], - record['StationCode'], - record['ObservationYear'], - record['ObservationDOY'], - with_filename = False, - rinexobj = rinexobj)) - - # copy fixed version into the archive (in case another session exists for RINEX v2) - archived_crinex = rinexobj.compress_local_copyto(path2archive) + self.build_rinex_path( + record['NetworkCode'], + record['StationCode'], + record['ObservationYear'], + record['ObservationDOY'], + with_filename=False, + rinexobj=rinexobj)) + + # copy fixed version into the archive (in case another + # session exists for RINEX v2) + archived_crinex = rinexobj.compress_local_copyto( + path2archive) copy_succeeded = True # get the rinex filename to update the database - rnx = RinexNameFormat(archived_crinex).to_rinex_format(pyRinexName.TYPE_RINEX, no_path=True) + rnx = RinexNameFormat(archived_crinex).to_rinex_format( + pyRinexName.TYPE_RINEX, no_path=True) if rnx != rinexobj.rinex: - # update the table with the filename (always force with step) + # update the table with the filename + # (always force with step) self.cnn.query('UPDATE rinex SET "Filename" = \'%s\' ' 'WHERE "NetworkCode" = \'%s\' ' 'AND "StationCode" = \'%s\' ' @@ -124,18 +135,23 @@ def insert_rinex(self, record=None, rinexobj=None): record['Completion'], record['Filename'])) - event = pyEvents.Event(Description = 'A new RINEX was added to the archive: %s' % record['Filename'], - NetworkCode = record['NetworkCode'], - StationCode = record['StationCode'], - Year = record['ObservationYear'], - DOY = record['ObservationDOY']) + event = pyEvents.Event( + Description=(('A new RINEX was ' + 'added to the archive: %s')) + % record['Filename'], + NetworkCode=record['NetworkCode'], + StationCode=record['StationCode'], + Year=record['ObservationYear'], + DOY=record['ObservationDOY']) else: - event = pyEvents.Event(Description = 'Archived CRINEX file %s added to the database.' % - record['Filename'], - NetworkCode = record['NetworkCode'], - StationCode = record['StationCode'], - Year = record['ObservationYear'], - DOY = record['ObservationDOY']) + event = pyEvents.Event( + Description=('Archived CRINEX file %s ' + 'added to the database.') + % record['Filename'], + NetworkCode=record['NetworkCode'], + StationCode=record['StationCode'], + Year=record['ObservationYear'], + DOY=record['ObservationDOY']) self.cnn.insert_event(event) @@ -143,7 +159,8 @@ def insert_rinex(self, record=None, rinexobj=None): self.cnn.rollback_transac() if rinexobj and copy_succeeded: - # transaction rolled back due to error. If file made into the archive, delete it. + # transaction rolled back due to error. If file made into + # the archive, delete it. os.remove(archived_crinex) raise @@ -163,35 +180,44 @@ def remove_rinex(self, record, move_to_dir=None): try: self.cnn.begin_transac() # propagate the deletes - # check if this rinex file is the file that was processed and used for solutions - where_station = '"NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' % (record['NetworkCode'], record['StationCode']) + # check if this rinex file is the file that was processed and used + # for solutions + where_station = ('"NetworkCode" = \'%s\' ' + 'AND "StationCode" = \'%s\'') % ( + record['NetworkCode'], record['StationCode']) rs = self.cnn.query( - 'SELECT * FROM rinex_proc WHERE %s AND "ObservationYear" = %i AND "ObservationDOY" = %i' + ('SELECT * FROM rinex_proc WHERE %s ' + 'AND "ObservationYear" = %i AND "ObservationDOY" = %i') % (where_station, record['ObservationYear'], record['ObservationDOY'])) if rs.ntuples() > 0: self.cnn.query( - 'DELETE FROM gamit_soln WHERE %s AND "Year" = %i AND "DOY" = %i' + ('DELETE FROM gamit_soln WHERE %s ' + 'AND "Year" = %i AND "DOY" = %i') % (where_station, record['ObservationYear'], record['ObservationDOY'])) self.cnn.query( - 'DELETE FROM ppp_soln WHERE %s AND "Year" = %i AND "DOY" = %i' + ('DELETE FROM ppp_soln WHERE %s ' + 'AND "Year" = %i AND "DOY" = %i') % (where_station, record['ObservationYear'], record['ObservationDOY'])) # get the filename - rinex_path = self.build_rinex_path(record['NetworkCode'], record['StationCode'], - record['ObservationYear'], record['ObservationDOY'], + rinex_path = self.build_rinex_path(record['NetworkCode'], + record['StationCode'], + record['ObservationYear'], + record['ObservationDOY'], filename=record['Filename']) rinex_path = os.path.join(self.Config.archive_path, rinex_path) # delete the rinex record self.cnn.query( - 'DELETE FROM rinex WHERE %s AND "ObservationYear" = %i AND "ObservationDOY" = %i AND "Filename" = \'%s\'' + ('DELETE FROM rinex WHERE %s AND "ObservationYear" = %i AND ' + '"ObservationDOY" = %i AND "Filename" = \'%s\'') % (where_station, record['ObservationYear'], record['ObservationDOY'], record['Filename'])) @@ -200,26 +226,35 @@ def remove_rinex(self, record, move_to_dir=None): if move_to_dir: filename = Utils.move(rinex_path, - os.path.join(move_to_dir, os.path.basename(rinex_path))) - description = 'RINEX %s was removed from the database and archive. ' \ - 'File moved to %s. See next events for reason.' % (record['Filename'], filename) + os.path.join( + move_to_dir, + os.path.basename(rinex_path))) + description = ('RINEX %s was removed from the database ' + 'and archive. ' + 'File moved to %s. See next events ' + 'for reason.') % (record['Filename'], + filename) else: os.remove(rinex_path) - description = 'RINEX %s was removed from the database and archive. ' \ - 'File was deleted. See next events for reason.' % (record['Filename']) + description = ('RINEX %s was removed from the database ' + 'and archive. ' + 'File was deleted. See next events ' + 'for reason.') % (record['Filename']) else: - description = 'RINEX %s was removed from the database and archive. File was NOT found in the archive ' \ - 'so no deletion was performed. See next events for reason.' % (record['Filename']) + description = 'RINEX %s was removed from the database and '\ + 'archive. File was NOT found in the archive ' \ + ('so no deletion was performed. See next events ' + 'for reason.') % (record['Filename']) # insert an event - event = pyEvents.Event(Description = description, - NetworkCode = record['NetworkCode'], - StationCode = record['StationCode'], - EventType = 'info', - Year = record['ObservationYear'], - DOY = record['ObservationDOY']) + event = pyEvents.Event(Description=description, + NetworkCode=record['NetworkCode'], + StationCode=record['StationCode'], + EventType='info', + Year=record['ObservationYear'], + DOY=record['ObservationDOY']) self.cnn.insert_event(event) @@ -230,11 +265,15 @@ def remove_rinex(self, record, move_to_dir=None): def get_rinex_record(self, **kwargs): """ - Retrieve a single or multiple records from the rinex table given a set parameters. If parameters are left empty, - it wil return all records matching the specified criteria. Each parameter acts like a filter, narrowing down the - records returned by the function. The default behavior is to use tables rinex or rinex_proc depending on the - provided parameters. E.g. if Interval, Completion and Filename are all left blank, the function will return the - records using rinex_proc. Otherwise, the rinex table will be used. + Retrieve a single or multiple records from the rinex table given a set + parameters. If parameters are left empty, it wil return all records + matching the specified criteria. Each parameter acts like a filter, + narrowing down the records returned by the function. The default + behavior is to use tables rinex or rinex_proc depending on the + provided parameters. E.g. if Interval, Completion and Filename are all + left blank, the function will return the records using rinex_proc. + Otherwise, the rinex table will be used. + :param NetworkCode: filter :param StationCode: filter :param ObservationYear: filter @@ -245,7 +284,8 @@ def get_rinex_record(self, **kwargs): :return: a dictionary will the records matching the provided parameters """ - if any(param in ('Interval', 'Completion', 'Filename') for param in kwargs.keys()): + if any(param in ('Interval', 'Completion', + 'Filename') for param in kwargs.keys()): table = 'rinex' else: table = 'rinex_proc' @@ -258,7 +298,8 @@ def get_rinex_record(self, **kwargs): for key in kwargs: if key not in fields.keys(): - raise ValueError('Parameter ' + key + ' is not a field in table ' + table) + raise ValueError('Parameter ' + key + + ' is not a field in table ' + table) elif key != 'ObservationFYear': # avoid FYear due to round off problems @@ -279,15 +320,15 @@ def get_rinex_record(self, **kwargs): def scan_archive_struct(self, rootdir, progress_bar=None): self.archiveroot = rootdir - rnx = [] + rnx = [] path2rnx = [] - fls = [] + fls = [] for path, _, files in scandir.walk(rootdir): for file in files: file_path = os.path.join(path, file) - crinex = file_path.rsplit(rootdir + '/')[1] + crinex = file_path.rsplit(rootdir + '/')[1] if progress_bar is not None: - progress_bar.set_postfix(crinex = crinex) + progress_bar.set_postfix(crinex=crinex) progress_bar.update() try: @@ -309,7 +350,7 @@ def scan_archive_struct_stninfo(self, rootdir): # same as scan archive struct but looks for station info files self.archiveroot = rootdir - stninfo = [] + stninfo = [] path2stninfo = [] for path, dirs, files in scandir.walk(rootdir): for file in files: @@ -324,21 +365,28 @@ def scan_archive_struct_stninfo(self, rootdir): return stninfo, path2stninfo - def build_rinex_path(self, NetworkCode, StationCode, ObservationYear, ObservationDOY, - with_filename=True, filename=None, rinexobj=None): + def build_rinex_path(self, NetworkCode, StationCode, ObservationYear, + ObservationDOY, with_filename=True, filename=None, + rinexobj=None): """ - Function to get the location in the archive of a rinex file. It has two modes of operation: - 1) retrieve an existing rinex file, either specific or the rinex for processing - (most complete, largest interval) or a specific rinex file (already existing in the rinex table). - 2) To get the location of a potential file (probably used for injecting a new file in the archive. No this mode, - filename has no effect. + Function to get the location in the archive of a rinex file. It has + two modes of operation: + 1) retrieve an existing rinex file, either specific or the rinex for + processing (most complete, largest interval) or a specific rinex + file (already existing in the rinex table). + 2) To get the location of a potential file (probably used for + injecting a new file in the archive. No this mode, filename has + no effect. + :param NetworkCode: NetworkCode of the station being retrieved :param StationCode: StationCode of the station being retrieved :param ObservationYear: Year of the rinex file being retrieved :param ObservationDOY: DOY of the rinex file being retrieved - :param with_filename: if set, returns a path including the filename. Otherwise, just returns the path + :param with_filename: if set, returns a path including the filename. + Otherwise, just returns the path :param filename: name of a specific file to search in the rinex table - :param rinexobj: a pyRinex object to pull the information from (to fill the achive keys). + :param rinexobj: a pyRinex object to pull the information from + (to fill the achive keys). :return: a path with or without filename """ if not rinexobj: @@ -348,31 +396,44 @@ def build_rinex_path(self, NetworkCode, StationCode, ObservationYear, Observatio for level in self.levels] + ['"Filename"']) if filename: - filename = RinexNameFormat(filename).to_rinex_format(pyRinexName.TYPE_RINEX) - - # if filename is set, user requesting a specific file: query rinex table - rs = self.cnn.query('SELECT ' + sql_string + ' FROM rinex WHERE "NetworkCode" = \'' + - NetworkCode + '\' AND "StationCode" = \'' + StationCode + - '\' AND "ObservationYear" = ' + str(ObservationYear) + ' AND "ObservationDOY" = ' + - str(ObservationDOY) + ' AND "Filename" = \'' + filename + '\'') + filename = RinexNameFormat(filename).to_rinex_format( + pyRinexName.TYPE_RINEX) + + # if filename is set, user requesting a specific file: + # query rinex table + rs = self.cnn.query('SELECT ' + sql_string + + ' FROM rinex WHERE "NetworkCode" = \'' + + NetworkCode + + '\' AND "StationCode" = \'' + StationCode + + '\' AND "ObservationYear" = ' + + str(ObservationYear) + + ' AND "ObservationDOY" = ' + + str(ObservationDOY) + + ' AND "Filename" = \'' + filename + '\'') else: - # if filename is NOT set, user requesting a the processing file: query rinex_proc + # if filename is NOT set, user requesting a the processing + # file: query rinex_proc rs = self.cnn.query( - 'SELECT ' + sql_string + ' FROM rinex_proc WHERE "NetworkCode" = \'' + NetworkCode + - '\' AND "StationCode" = \'' + StationCode + '\' AND "ObservationYear" = ' + str( - ObservationYear) + ' AND "ObservationDOY" = ' + str(ObservationDOY)) + 'SELECT ' + sql_string + + ' FROM rinex_proc WHERE "NetworkCode" = \'' + NetworkCode + + '\' AND "StationCode" = \'' + StationCode + + '\' AND "ObservationYear" = ' + str( + ObservationYear) + ' AND "ObservationDOY" = ' + + str(ObservationDOY)) if not rs.ntuples(): return None field = rs.dictresult()[0] - path = "/".join(str(field[level['rinex_col_in']]).zfill(level['TotalChars']) - for level in self.levels) + path = "/".join( + str(field[level['rinex_col_in']]).zfill(level['TotalChars']) + for level in self.levels) if with_filename: rnx_name = RinexNameFormat(field['Filename']) # database stores rinex, we want crinez - return path + "/" + rnx_name.to_rinex_format(pyRinexName.TYPE_CRINEZ) + return path + "/" + rnx_name.to_rinex_format( + pyRinexName.TYPE_CRINEZ) else: return path @@ -385,14 +446,17 @@ def build_rinex_path(self, NetworkCode, StationCode, ObservationYear, Observatio kk = kk.zfill(level['TotalChars']) if len(kk) != level['TotalChars']: - raise ValueError('Invalid record \'%s\' for key \'%s\'' % (kk, level['KeyCode'])) + raise ValueError('Invalid record \'%s\' for key \'%s\'' % + (kk, level['KeyCode'])) keys += [kk] path = '/'.join(keys) crinez_path = os.path.join(path, rinexobj.crinez) - valid, _ = self.parse_archive_keys(crinez_path, - tuple(item['KeyCode'] for item in self.levels)) + valid, _ = self.parse_archive_keys(crinez_path, + tuple(item[ + 'KeyCode'] for item in + self.levels)) if not valid: raise ValueError('Invalid path result: %s' % path) @@ -403,9 +467,12 @@ def build_rinex_path(self, NetworkCode, StationCode, ObservationYear, Observatio def parse_archive_keys(self, path_filename, key_filter=()): """ - based on a path and filename, this function parses the data and organizes the information in a dictionary - key_filter allows to select which keys you want to get a hold on. The order of the keys in the path is given - by the database table rinex_tank_struct + based on a path and filename, this function parses the data and + organizes the information in a dictionary + key_filter allows to select which keys you want to get a hold on. + The order of the keys in the path is given by the database table + rinex_tank_struct + :param path: :param key_filter: :return: @@ -413,14 +480,16 @@ def parse_archive_keys(self, path_filename, key_filter=()): keys_out = {} try: - path = os.path.dirname(path_filename).split('/') + path = os.path.dirname(path_filename).split('/') filename = os.path.basename(path_filename) - # check the number of levels in path parts against the number of expected levels + # check the number of levels in path parts against + # the number of expected levels if len(path) != len(self.levels): return False, {} - # now look in the different levels to match more data (or replace filename keys) + # now look in the different levels to match more data + # (or replace filename keys) for key in self.levels: path_l = path[key['Level'] - 1] if len(path_l) != key['TotalChars']: @@ -434,18 +503,17 @@ def parse_archive_keys(self, path_filename, key_filter=()): # fill in all the possible keys_out using the crinex file info keys_out['station'] = fileparts.StationCode - keys_out['doy'] = fileparts.date.doy + keys_out['doy'] = fileparts.date.doy keys_out['session'] = fileparts.session - keys_out['year'] = fileparts.date.year + keys_out['year'] = fileparts.date.year # check date is valid and also fill day and month keys_out - keys_out['day'] = fileparts.date.day + keys_out['day'] = fileparts.date.day keys_out['month'] = fileparts.date.month - return True, {key: keys_out[key] + return True, {key: keys_out[key] for key in keys_out.keys() if key in key_filter} except: return False, {} - From e79c58141942b67b075b501566500f43e11cad47 Mon Sep 17 00:00:00 2001 From: Parick Smith Date: Sun, 13 Oct 2024 21:23:46 -0400 Subject: [PATCH 4/7] pep8 compliance pyBunch --- pgamit/pyBunch.py | 65 +++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/pgamit/pyBunch.py b/pgamit/pyBunch.py index b7a30fa8..a7b99e37 100644 --- a/pgamit/pyBunch.py +++ b/pgamit/pyBunch.py @@ -45,7 +45,8 @@ class Bunch(dict): >>> b.foo is b['foo'] True - A Bunch is a subclass of dict; it supports all the methods a dict does... + A Bunch is a subclass of dict; + it supports all the methods a dict does... >>> sorted(b.keys()) ['foo', 'hello'] @@ -63,10 +64,12 @@ class Bunch(dict): And "splats". - >>> "The {knights} who say {ni}!".format(**Bunch(knights='lolcats', ni='can haz')) + >>> "The {knights} who say {ni}!".format(**Bunch(knights='lolcats', + ni='can haz')) 'The lolcats who say can haz!' - See unbunchify/Bunch.toDict, bunchify/Bunch.fromDict for notes about conversion. + See unbunchify/Bunch.toDict, bunchify/Bunch.fromDict for notes about + conversion. """ def __contains__(self, k): @@ -97,7 +100,8 @@ def __contains__(self, k): def __getattr__(self, k): """ Gets key if it exists, otherwise throws AttributeError. - nb. __getattr__ is only called if key is not found in normal places. + nb. __getattr__ is only called if key is not found in normal + places. >>> b = Bunch(bar='baz', lol={}) >>> b.foo @@ -154,9 +158,9 @@ def __setattr__(self, k, v): object.__setattr__(self, k, v) def __delattr__(self, k): - """ Deletes attribute k if it exists, otherwise deletes key k. A KeyError - raised by deleting the key--such as when the key is missing--will - propagate as an AttributeError instead. + """ Deletes attribute k if it exists, otherwise deletes key k. A + KeyError raised by deleting the key--such as when the key is + missing--will propagate as an AttributeError instead. >>> b = Bunch(lol=42) >>> del b.values @@ -200,7 +204,8 @@ def __repr__(self): >>> eval(repr(b)) Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!') - (*) Invertible so long as collection contents are each repr-invertible. + (*) Invertible so long as collection contents are each + repr-invertible. """ args = ', '.join('%s=%r' % (key, self[key]) for key in sorted(self.keys())) @@ -243,7 +248,8 @@ def bunchify(x): >>> b.lol[1].hah 'i win again' - nb. As dicts are not hashable, they cannot be nested in sets/frozensets. + nb. As dicts are not hashable, they cannot be nested in + sets/frozensets. """ if isinstance(x, dict): return Bunch((k, bunchify(v)) for k, v in x.items()) @@ -269,7 +275,8 @@ def unbunchify(x): {'ponies': ('are pretty!', {'lies': 'are trouble!'}), 'foo': ['bar', {'lol': True}], 'hello': 42} - nb. As dicts are not hashable, they cannot be nested in sets/frozensets. + nb. As dicts are not hashable, they cannot be nested in + sets/frozensets. """ if isinstance(x, dict): return dict((k, unbunchify(v)) for k, v in x.items()) @@ -279,7 +286,7 @@ def unbunchify(x): return x -### Serialization +# Serialization try: try: @@ -287,9 +294,9 @@ def unbunchify(x): except ImportError: import simplejson as json - def toJSON(self, **options): - """ Serializes this Bunch to JSON. Accepts the same keyword options as `json.dumps()`. + """ Serializes this Bunch to JSON. Accepts the same keyword options + as `json.dumps()`. >>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!') >>> json.dumps(b) @@ -299,7 +306,6 @@ def toJSON(self, **options): """ return json.dumps(self, **options) - Bunch.toJSON = toJSON except ImportError: @@ -310,31 +316,33 @@ def toJSON(self, **options): import yaml from yaml.representer import Representer, SafeRepresenter - def from_yaml(loader, node): - """ PyYAML support for Bunches using the tag `!bunch` and `!bunch.Bunch`. + """ PyYAML support for Bunches using the tag `!bunch` and + `!bunch.Bunch`. >>> import yaml >>> yaml.load(''' - ... Flow style: !bunch.Bunch { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki } + ... Flow style: !bunch.Bunch { Clark: Evans, Brian: Ingerson, + Oren: Ben-Kiki } ... Block style: !bunch ... Clark : Evans ... Brian : Ingerson ... Oren : Ben-Kiki ... ''') #doctest: +NORMALIZE_WHITESPACE - {'Flow style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki'), - 'Block style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki')} - - This module registers itself automatically to cover both Bunch and any - subclasses. Should you want to customize the representation of a subclass, - simply register it with PyYAML yourself. + {'Flow style': Bunch(Brian='Ingerson', Clark='Evans', + Oren='Ben-Kiki'), + 'Block style': Bunch(Brian='Ingerson', Clark='Evans', + Oren='Ben-Kiki')} + + This module registers itself automatically to cover both Bunch and + any subclasses. Should you want to customize the representation of + a subclass, simply register it with PyYAML yourself. """ data = Bunch() yield data value = loader.construct_mapping(node) data.update(value) - def to_yaml_safe(dumper, data): """ Converts Bunch to a normal mapping node, making it appear as a dict in the YAML output. @@ -346,7 +354,6 @@ def to_yaml_safe(dumper, data): """ return dumper.represent_dict(data) - def to_yaml(dumper, data): """ Converts Bunch to a representation node. @@ -357,7 +364,6 @@ def to_yaml(dumper, data): """ return dumper.represent_mapping('!bunch.Bunch', data) - yaml.add_constructor('!bunch', from_yaml) yaml.add_constructor('!bunch.Bunch', from_yaml) @@ -367,11 +373,10 @@ def to_yaml(dumper, data): Representer.add_representer(Bunch, to_yaml) Representer.add_multi_representer(Bunch, to_yaml) - # Instance methods for YAML conversion def toYAML(self, **options): - """ Serializes this Bunch to YAML, using `yaml.safe_dump()` if - no `Dumper` is provided. See the PyYAML documentation for more info. + """ Serializes this Bunch to YAML, using `yaml.safe_dump()` if no + `Dumper` is provided. See the PyYAML documentation for more info. >>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42) >>> import yaml @@ -391,11 +396,9 @@ def toYAML(self, **options): else: return yaml.dump(self, **opts) - def fromYAML(*args, **kwargs): return bunchify(yaml.load(*args, **kwargs)) - Bunch.toYAML = toYAML Bunch.fromYAML = staticmethod(fromYAML) From 619a39cee1019c0c1fe361b774814628533a4dc4 Mon Sep 17 00:00:00 2001 From: Parick Smith Date: Sun, 13 Oct 2024 21:24:43 -0400 Subject: [PATCH 5/7] pep8 pyarchivestruct --- pgamit/pyArchiveStruct.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pgamit/pyArchiveStruct.py b/pgamit/pyArchiveStruct.py index 5b7bee78..4a0138da 100644 --- a/pgamit/pyArchiveStruct.py +++ b/pgamit/pyArchiveStruct.py @@ -385,7 +385,7 @@ def build_rinex_path(self, NetworkCode, StationCode, ObservationYear, :param with_filename: if set, returns a path including the filename. Otherwise, just returns the path :param filename: name of a specific file to search in the rinex table - :param rinexobj: a pyRinex object to pull the information from + :param rinexobj: a pyRinex object to pull the information from (to fill the achive keys). :return: a path with or without filename """ @@ -411,7 +411,7 @@ def build_rinex_path(self, NetworkCode, StationCode, ObservationYear, str(ObservationDOY) + ' AND "Filename" = \'' + filename + '\'') else: - # if filename is NOT set, user requesting a the processing + # if filename is NOT set, user requesting a the processing # file: query rinex_proc rs = self.cnn.query( 'SELECT ' + sql_string + @@ -495,7 +495,8 @@ def parse_archive_keys(self, path_filename, key_filter=()): if len(path_l) != key['TotalChars']: return False, {} - keys_out[key['KeyCode']] = int(path_l) if key['isnumeric'] == '1' else path_l.lower() + keys_out[key['KeyCode']] = int(path_l) if key[ + 'isnumeric'] == '1' else path_l.lower() if not filename.endswith('.info'): From d89fcaac9276d9546bdf5cb1fde065ee7102f88b Mon Sep 17 00:00:00 2001 From: Parick Smith Date: Sun, 13 Oct 2024 21:39:21 -0400 Subject: [PATCH 6/7] pep8 pydate --- pgamit/pyDate.py | 237 +++++++++++++++++++++++++++-------------------- 1 file changed, 139 insertions(+), 98 deletions(-) diff --git a/pgamit/pyDate.py b/pgamit/pyDate.py index 32e68c4a..ca89b9cc 100644 --- a/pgamit/pyDate.py +++ b/pgamit/pyDate.py @@ -4,7 +4,8 @@ Author: Abel Brown Modified by: Demian D. Gomez -Class that handles all the date conversions betweem different systems and formats +Class that handles all the date conversions between +different systems and formats """ @@ -29,11 +30,11 @@ def __str__(self): return str(self.value) -def yeardoy2fyear(year,doy,hour=12,minute=0,second=0): +def yeardoy2fyear(year, doy, hour=12, minute=0, second=0): # parse to integers (defensive) year = int(year) - doy = int(doy) + doy = int(doy) hour = int(hour) # default number of days in a year @@ -48,7 +49,8 @@ def yeardoy2fyear(year,doy,hour=12,minute=0,second=0): raise pyDateException('invalid day of year') # compute the fractional year - fractionalYear = year + ((doy-1) + hour/24. + minute/1440. + second/86400.)/diy + fractionalYear = year + ((doy-1) + hour/24. + + minute/1440. + second/86400.)/diy # that's all ... return fractionalYear @@ -64,24 +66,24 @@ def fyear2yeardoy(fyear): else: days = 365 - doy = floor(days*fractionOfyear)+1 - hh = (days*fractionOfyear - floor(days*fractionOfyear))*24. - hour = floor(hh) - mm = (hh - floor(hh))*60. + doy = floor(days*fractionOfyear)+1 + hh = (days*fractionOfyear - floor(days*fractionOfyear))*24. + hour = floor(hh) + mm = (hh - floor(hh))*60. minute = floor(mm) - ss = (mm - floor(mm))*60. + ss = (mm - floor(mm))*60. second = floor(ss) - return int(year),int(doy), int(hour), int(minute), int(second) + return int(year), int(doy), int(hour), int(minute), int(second) -def date2doy(year,month,day,hour=12,minute=0,second=0): +def date2doy(year, month, day, hour=12, minute=0, second=0): # parse to integers (defensive) - year = int(year) + year = int(year) month = int(month) - day = int(day) - hour = int(hour) + day = int(day) + hour = int(hour) # localized days of year if year % 4 == 0: @@ -93,17 +95,17 @@ def date2doy(year,month,day,hour=12,minute=0,second=0): doy = lday[month - 1]+day # finally, compute fractional year - fyear = yeardoy2fyear(year,doy,hour,minute,second) + fyear = yeardoy2fyear(year, doy, hour, minute, second) # that's a [w]rap return doy, fyear -def doy2date(year,doy): +def doy2date(year, doy): # parsem up to integers year = int(year) - doy = int(doy) + doy = int(doy) # make note of leap year or not isLeapYear = (year % 4 == 0) @@ -111,11 +113,12 @@ def doy2date(year,doy): # make note of valid doy for year mxd = 365 if isLeapYear: - mxd +=1 + mxd += 1 # check doy based on year if doy < 1 or doy > mxd: - raise pyDateException('day of year input is invalid (year=%i doy=%i)' % (year, doy)) + raise pyDateException( + 'day of year input is invalid (year=%i doy=%i)' % (year, doy)) # localized days if isLeapYear: @@ -126,48 +129,48 @@ def doy2date(year,doy): lday = (31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) # compute the month - for i in range(0,12): + for i in range(0, 12): if doy <= lday[i]: - #remember: zero based indexing! - month=i+1 + # remember: zero based indexing! + month = i+1 break # compute the day (dont forget zero based indexing) - day = doy - fday[month-1] +1 + day = doy - fday[month-1] + 1 - return month,day + return month, day -def date2gpsDate(year,month,day): +def date2gpsDate(year, month, day): - year = int(year) + year = int(year) month = int(month) - day = int(day) + day = int(day) if month <= 2: month += 12 - year -= 1 + year -= 1 - ut = (day % 1) *24. + ut = (day % 1) * 24. day = floor(day) - julianDay = ( floor( 365.25 * year ) - + floor( 30.6001 * ( month + 1. ) ) + julianDay = (floor(365.25 * year) + + floor(30.6001 * (month + 1.)) + day - + ut/24. + + ut/24. + 1720981.5) - gpsWeek = floor((julianDay - 2444244.5)/7.) + gpsWeek = floor((julianDay - 2444244.5)/7.) gpsWeekDay = (julianDay - 2444244.5) % 7 # that's a [w]rap return int(gpsWeek), int(gpsWeekDay) -def gpsDate2mjd(gpsWeek,gpsWeekDay): +def gpsDate2mjd(gpsWeek, gpsWeekDay): # parse to integers - gpsWeek = int(gpsWeek) + gpsWeek = int(gpsWeek) gpsWeekDay = int(gpsWeekDay) mjd = (gpsWeek * 7.) + 44244. + gpsWeekDay @@ -191,11 +194,11 @@ def mjd2date(mjd): e = c - floor((1461. * d) / 4.) m = floor((5. * e + 2.) / 153.) - day = e - floor((153. * m + 2.) / 5.) + 1. + day = e - floor((153. * m + 2.) / 5.) + 1. month = m + 3. - 12. * floor(m / 10.) - year = b * 100. + d - 4800. + floor(m / 10.) + year = b * 100. + d - 4800. + floor(m / 10.) - return int(year),int(month),int(day) + return int(year), int(month), int(day) def parse_stninfo(stninfo_datetime): @@ -210,7 +213,8 @@ def parse_stninfo(stninfo_datetime): if int(sdate[0]) == 9999: return None, None, None, None, None else: - return int(sdate[0]), int(sdate[1]), int(sdate[2]), int(sdate[3]), int(sdate[4]) + return int(sdate[0]), int(sdate[1]), int(sdate[2]), int(sdate[3]), \ + int(sdate[4]) class Date(object): @@ -218,17 +222,19 @@ class Date(object): def __init__(self, **kwargs): # init - self.mjd = None - self.fyear = None - self.year = None - self.doy = None - self.day = None - self.month = None - self.gpsWeek = None + self.mjd = None + self.fyear = None + self.year = None + self.doy = None + self.day = None + self.month = None + self.gpsWeek = None self.gpsWeekDay = None - self.hour = 12 # DDG 03-28-2017: include hour and minute to work with station info object - self.minute = 0 - self.second = 0 + self.hour = 12 + # DDG 03-28-2017: + # include hour and minute to work with station info object + self.minute = 0 + self.second = 0 self.from_stninfo = False @@ -256,22 +262,30 @@ def __init__(self, **kwargs): self.gpsWeek = arg elif key == 'gpsweekday': self.gpsWeekDay = arg - elif key in ('fyear','fractionalyear','fracyear'): + elif key in ('fyear', 'fractionalyear', 'fracyear'): self.fyear = arg elif key == 'mjd': self.mjd = arg - elif key == 'hour': # DDG 03-28-2017: include hour to work with station info object + elif key == 'hour': + # DDG 03-28-2017: + # include hour to work with station info object self.hour = arg - elif key == 'minute': # DDG 03-28-2017: include minute to work with station info object + elif key == 'minute': + # DDG 03-28-2017: + # include minute to work with station info object self.minute = arg - elif key == 'second': # DDG 03-28-2017: include second to work with station info object + elif key == 'second': + # DDG 03-28-2017: + # include second to work with station info object self.second = arg - elif key == 'datetime': # DDG 03-28-2017: handle conversion from datetime to pyDate + elif key == 'datetime': + # DDG 03-28-2017: + # handle conversion from datetime to pyDate if isinstance(arg, datetime): - self.day = arg.day - self.month = arg.month - self.year = arg.year - self.hour = arg.hour + self.day = arg.day + self.month = arg.month + self.year = arg.year + self.hour = arg.hour self.minute = arg.minute self.second = arg.second else: @@ -281,59 +295,69 @@ def __init__(self, **kwargs): self.from_stninfo = True if isinstance(arg, str): - self.year, self.doy, self.hour, self.minute, self.second = parse_stninfo(arg) + self.year, self.doy, self.hour, self.minute, \ + self.second = parse_stninfo(arg) elif isinstance(arg, datetime) or isinstance(arg, Date): - self.day = arg.day - self.month = arg.month - self.year = arg.year - self.hour = arg.hour + self.day = arg.day + self.month = arg.month + self.year = arg.year + self.hour = arg.hour self.minute = arg.minute self.second = arg.second elif arg is None: - # ok to receive a None argument from the database due to 9999 999 00 00 00 records + # ok to receive a None argument from the database due to + # 9999 999 00 00 00 records break else: - raise pyDateException('invalid type %s for %s\n' % (str(type(arg)), key)) - + raise pyDateException('invalid type %s for %s\n' % + (str(type(arg)), key)) + else: raise pyDateException('unrecognized input arg: '+key+'\n') # make due with what we gots if self.year is not None and \ - self.doy is not None: + self.doy is not None: # compute the month and day of month self.month, self.day = doy2date(self.year, self.doy) # compute the fractional year - self.fyear = yeardoy2fyear(self.year, self.doy,self.hour, self.minute, self.second) + self.fyear = yeardoy2fyear(self.year, self.doy, self.hour, + self.minute, self.second) # compute the gps date - self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, self.month, self.day) + self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, + self.month, self.day) - self.mjd = gpsDate2mjd(self.gpsWeek,self.gpsWeekDay) + self.mjd = gpsDate2mjd(self.gpsWeek, self.gpsWeekDay) - elif self.gpsWeek is not None and \ + elif self.gpsWeek is not None and \ self.gpsWeekDay is not None: # initialize modified julian day from gps date self.mjd = gpsDate2mjd(self.gpsWeek, self.gpsWeekDay) # compute year, month, and day of month from modified julian day - self.year, self.month,self.day = mjd2date(self.mjd) + self.year, self.month, self.day = mjd2date(self.mjd) # compute day of year from month and day of month - self.doy, self.fyear = date2doy(self.year, self.month, self.day, self.hour, self.minute, self.second) + self.doy, self.fyear = date2doy(self.year, self.month, self.day, + self.hour, self.minute, + self.second) - elif self.year is not None and \ + elif self.year is not None and \ self.month is not None and \ self.day: # initialize day of year and fractional year from date - self.doy, self.fyear = date2doy(self.year, self.month, self.day, self.hour, self.minute, self.second) + self.doy, self.fyear = date2doy(self.year, self.month, self.day, + self.hour, self.minute, + self.second) # compute the gps date - self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, self.month, self.day) + self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, self.month, + self.day) # init the modified julian date self.mjd = gpsDate2mjd(self.gpsWeek, self.gpsWeekDay) @@ -341,14 +365,16 @@ def __init__(self, **kwargs): elif self.fyear is not None: # initialize year and day of year - self.year, self.doy, self.hour, self.minute, self.second = fyear2yeardoy(self.fyear) + self.year, self.doy, self.hour, self.minute, self.second = \ + fyear2yeardoy(self.fyear) # set the month and day of month # compute the month and day of month self.month, self.day = doy2date(self.year, self.doy) # compute the gps date - self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, self.month, self.day) + self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, self.month, + self.day) # finally, compute modified jumlian day self.mjd = gpsDate2mjd(self.gpsWeek, self.gpsWeekDay) @@ -359,16 +385,19 @@ def __init__(self, **kwargs): self.year, self.month, self.day = mjd2date(self.mjd) # compute day of year from month and day of month - self.doy, self.fyear = date2doy(self.year, self.month, self.day, self.hour, self.minute, self.second) + self.doy, self.fyear = date2doy(self.year, self.month, self.day, + self.hour, self.minute, + self.second) # compute the gps date - self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, self.month, self.day) + self.gpsWeek, self.gpsWeekDay = date2gpsDate(self.year, self.month, + self.day) elif not self.from_stninfo: - # if empty Date object from a station info, it means that it should be printed as 9999 999 00 00 00 - raise pyDateException('not enough independent input args to compute full date') - - + # if empty Date object from a station info, it means that it + # should be printed as 9999 999 00 00 00 + raise pyDateException( + 'not enough independent input args to compute full date') def strftime(self): return self.datetime().strftime('%Y-%m-%d %H:%M:%S') @@ -377,9 +406,9 @@ def to_json(self): if self.from_stninfo: return {'stninfo': str(self)} else: - return {'year' : self.year, - 'doy' : self.doy, - 'hour' : self.hour, + return {'year': self.year, + 'doy': self.doy, + 'hour': self.hour, 'minute': self.minute, 'second': self.second} @@ -390,11 +419,15 @@ def __str__(self): if self.year is None: return '9999 999 00 00 00' else: - return '%04i %03i %02i %02i %02i' % (self.year, self.doy, self.hour, self.minute, self.second) + return '%04i %03i %02i %02i %02i' % (self.year, self.doy, + self.hour, self.minute, + self.second) def __check_cmp(self, date): if not isinstance(date, Date): - raise pyDateException('type: %s invalid. Can only compare pyDate.Date objects' % str(type(date))) + raise pyDateException( + 'type: %s invalid. Can only compare pyDate.Date objects' % + str(type(date))) def __lt__(self, date): self.__check_cmp(date) @@ -422,7 +455,9 @@ def __ne__(self, date): def __add__(self, ndays): if not isinstance(ndays, int): - raise pyDateException('type: %s invalid. Can only add integer number of days' % str(type(ndays))) + raise pyDateException( + 'type: %s invalid. Can only add integer number of days' % + str(type(ndays))) return Date(mjd=self.mjd+ndays) @@ -432,7 +467,9 @@ def __sub__(self, ndays): elif isinstance(ndays, Date): return self.mjd - ndays.mjd else: - raise pyDateException('type: %s invalid. Can only subtract integer number of days' % str(type(ndays))) + raise pyDateException( + 'type: %s invalid. Can only subtract integer number of days' % + str(type(ndays))) def __hash__(self): # to make the object hashable @@ -458,7 +495,7 @@ def yyyyddd(self, space=True): return '%4i %03i' % (int(self.year), int(self.doy)) else: return '%4i%03i' % (int(self.year), int(self.doy)) - + def iso_date(self): return "%d-%02d-%02d" % (self.year, self.month, self.day) @@ -468,19 +505,23 @@ def datetime(self): hour=1, minute=1, second=1) else: return datetime(year=self.year, month=self.month, day=self.day, - hour=self.hour, minute=self.minute, second=self.second) + hour=self.hour, minute=self.minute, + second=self.second) def first_epoch(self, out_format='datetime'): if out_format == 'datetime': - return datetime(year=self.year, month=self.month, day=self.day, hour=0, minute=0, second=0).strftime( + return datetime(year=self.year, month=self.month, day=self.day, + hour=0, minute=0, second=0).strftime( '%Y-%m-%d %H:%M:%S') else: - return date2doy(self.year, self.month, self.day, 0, 0, 0)[1] # fyear + return date2doy(self.year, self.month, self.day, 0, 0, 0)[1] + # fyear def last_epoch(self, out_format='datetime'): if out_format == 'datetime': - return datetime(year=self.year, month=self.month, day=self.day, hour=23, minute=59, second=59).strftime( + return datetime(year=self.year, month=self.month, day=self.day, + hour=23, minute=59, second=59).strftime( '%Y-%m-%d %H:%M:%S') else: - return date2doy(self.year, self.month, self.day, 23, 59, 59)[1] # fyear - + return date2doy(self.year, self.month, self.day, 23, 59, 59)[1] + # fyear From 681f5a70a477c5c2a317c78201ac6e2c87f0294d Mon Sep 17 00:00:00 2001 From: Parick Smith Date: Sun, 13 Oct 2024 22:11:36 -0400 Subject: [PATCH 7/7] pep8 pyetm part 1 --- pgamit/pyETM.py | 445 ++++++++++++++++++++++++++++++------------------ 1 file changed, 281 insertions(+), 164 deletions(-) diff --git a/pgamit/pyETM.py b/pgamit/pyETM.py index b49bf57b..0db531bb 100644 --- a/pgamit/pyETM.py +++ b/pgamit/pyETM.py @@ -5,17 +5,14 @@ Author: Demian D. Gomez """ import datetime -from os.path import getmtime -from pprint import pprint import traceback import warnings import sys import os from time import time -from io import BytesIO import base64 import logging -from logging import INFO, ERROR, WARNING, DEBUG, StreamHandler, Formatter +from logging import StreamHandler, Formatter # deps import numpy as np @@ -75,7 +72,8 @@ "position": "Posición de ref.", "completion": "Completitud", "other": "otros términos polinómicos", - "not_enough": "No hay suficientes soluciones para ajustar trayectorias.", + "not_enough": + "No hay suficientes soluciones para ajustar trayectorias.", "table_too_long": "Tabla demasiado larga!", "frequency": "Frecuencia", "N residuals": "Residuos N", @@ -143,7 +141,8 @@ def toc(text): DEFAULT_RELAXATION = np.array([0.5]) DEFAULT_POL_TERMS = 2 DEFAULT_FREQUENCIES = np.array( - (1 / 365.25, 1 / (365.25 / 2))) # (1 yr, 6 months) expressed in 1/days (one year = 365.25) + (1 / 365.25, 1 / (365.25 / 2))) +# (1 yr, 6 months) expressed in 1/days (one year = 365.25) SIGMA_FLOOR_H = 0.10 SIGMA_FLOOR_V = 0.15 @@ -161,11 +160,13 @@ class Model(object): def __init__(self, m_type, **kwargs): """ - Interface to remove pre-determined model from time series. Currently only velocity (VEL) and postseismic - deformation (LOG) implemented. For velocity, pass m_type = Model.VEL, date = reference date of velocity, and - velocity = ndarray((3,1)). For postseismic, poss m_type = Model.LOG, date = jump datetime, relaxation = - ndarray((n,1)), log_amplitude = ndarray((n,3)). To eval the model, call eval with the t vector corresponding to - the time series. + Interface to remove pre-determined model from time series. Currently + only velocity (VEL) and postseismic deformation (LOG) implemented. For + velocity, pass m_type = Model.VEL, date = reference date of velocity, + and velocity = ndarray((3,1)). For postseismic,poss m_type = + Model.LOG, date = jump datetime, relaxation = ndarray((n,1)), + log_amplitude = ndarray((n,3)). To eval the model, call eval with the + t vector corresponding to the time series. """ self.type = m_type self.date = None @@ -183,28 +184,32 @@ def __init__(self, m_type, **kwargs): elif isinstance(arg, float): self.relaxation = np.array(arg) else: - raise pyETMException_Model('\'relaxation\' must be list, numpy.ndarray, or float') + raise pyETMException_Model( + '\'relaxation\' must be list, numpy.ndarray, or float') elif key == 'velocity': if isinstance(arg, list): self.velocity = np.array(arg) elif isinstance(arg, np.ndarray): self.velocity = arg else: - raise pyETMException_Model('\'velocity\' must be list or numpy.ndarray') + raise pyETMException_Model( + '\'velocity\' must be list or numpy.ndarray') elif key == 'log_amplitude': if isinstance(arg, list): self.log_amplitude = np.array(arg) elif isinstance(arg, np.ndarray): self.log_amplitude = arg else: - raise pyETMException_Model('\'log_amplitude\' must be list or numpy.ndarray') + raise pyETMException_Model( + '\'log_amplitude\' must be list or numpy.ndarray') elif key == 'date': if isinstance(arg, pyDate.Date): self.date = arg elif isinstance(arg, datetime.datetime): self.date = pyDate.Date(datetime=arg) else: - raise pyETMException_Model('\'date\' must be pyDate.Date or datetime') + raise pyETMException_Model( + '\'date\' must be pyDate.Date or datetime') elif key == 'fit': if isinstance(arg, bool): self.fit = arg @@ -214,7 +219,9 @@ def __init__(self, m_type, **kwargs): if m_type == self.LOG: # validate the dimensions of relaxation and amplitude if self.log_amplitude.shape[0] != self.relaxation.shape[0]: - raise pyETMException_Model('\'log_amplitude\' dimension 0 must match the elements in relaxation') + raise pyETMException_Model( + '\'log_amplitude\' dimension 0' + + 'must match the elements in relaxation') def eval(self, t): model = np.zeros((3, t.shape[0])) @@ -224,17 +231,23 @@ def eval(self, t): for i in range(3): if self.type == self.VEL: - logger.info('Applying velocity for reference date %s' % self.date.yyyyddd()) + logger.info('Applying velocity for reference date %s' % + self.date.yyyyddd()) model[i] = (t - self.date.fyear) * self.velocity[i] elif self.type == self.LOG: - logger.info('Applying log model for event %s' % self.date.yyyyddd()) - # log parameters passed, check each relaxation to see if one has to be removed + logger.info('Applying log model for event %s' % + self.date.yyyyddd()) + # log parameters passed, check each relaxation to see if one + # has to be removed for j, r in enumerate(self.relaxation): - # for each relaxation, evaluate the model to subtract it from self.l + # for each relaxation, evaluate the model to subtract it + # from self.l hl = np.zeros((t.shape[0],)) - pmodel = np.zeros((3, t.shape[0])) - hl[t > self.date.fyear] = np.log10(1. + (t[t > self.date.fyear] - self.date.fyear) / r) + # pmodel = np.zeros((3, t.shape[0])) + hl[t > self.date.fyear] = \ + np.log10(1. + (t[t > self.date.fyear] - + self.date.fyear) / r) # apply the amplitudes amp = self.log_amplitude[j][i] model[i] += amp * hl @@ -255,9 +268,11 @@ def __str__(self): class pyETMException_NoDesignMatrix(pyETMException): pass + class pyETMException_Model(pyETMException): pass + def distance(lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points @@ -272,7 +287,8 @@ def distance(lon1, lat1, lon2, lat2): # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 - a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2 + a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * \ + np.cos(lat2) * np.sin(dlon / 2) ** 2 c = 2 * np.arcsin(np.sqrt(a)) km = 6371 * c return km @@ -282,9 +298,13 @@ def to_postgres(dictionary): if isinstance(dictionary, dict): for key, val in list(dictionary.items()): if isinstance(val, np.ndarray): - dictionary[key] = str(val.flatten().tolist()).replace('[', '{').replace(']', '}') + dictionary[key] = \ + str(val.flatten().tolist()).replace( + '[', '{').replace(']', '}') else: - dictionary = str(dictionary.flatten().tolist()).replace('[', '{').replace(']', '}') + dictionary = \ + str(dictionary.flatten().tolist()).replace( + '[', '{').replace(']', '}') return dictionary @@ -315,13 +335,17 @@ def __init__(self, cnn, NetworkCode, StationCode): self.stack_name = 'ppp' # get the station from the stations table - stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' - % (NetworkCode, StationCode)) + stn = cnn.query( + ('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' ' + + 'AND "StationCode" = \'%s\'') + % (NetworkCode, StationCode)) stn = stn.dictresult()[0] if stn['lat'] is None: - raise pyETMException('Station %s has no valid metadata in the stations table.' % stn_id) + raise pyETMException( + 'Station %s has no valid metadata in the stations table.' + % stn_id) self.lat = np.array([float(stn['lat'])]) self.lon = np.array([float(stn['lon'])]) @@ -340,29 +364,39 @@ def __init__(self, cnn, NetworkCode, StationCode): self.max_dist = 20 # load all the PPP coordinates available for this station - # exclude ppp solutions in the exclude table and any solution that is more than 20 meters from the simple - # linear trend calculated above + # exclude ppp solutions in the exclude table and any solution that is + # more than 20 meters from the simple linear trend calculated above - self.excluded = cnn.query_float('SELECT "Year", "DOY" FROM ppp_soln_excl ' - 'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' - % (NetworkCode, StationCode)) + self.excluded = cnn.query_float( + 'SELECT "Year", "DOY" FROM ppp_soln_excl ' + 'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' + % (NetworkCode, StationCode)) self.table = cnn.query_float( 'SELECT "X", "Y", "Z", "Year", "DOY" FROM ppp_soln p1 ' - 'WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\' ORDER BY "Year", "DOY"' + ('WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\'' + + 'ORDER BY "Year", "DOY"') % (NetworkCode, StationCode)) self.table = [item for item in self.table - if np.sqrt(np.square(item[0] - x) + np.square(item[1] - y) + np.square(item[2] - z)) <= + if np.sqrt( + np.square(item[0] - x) + + np.square(item[1] - y) + + np.square(item[2] - z)) <= self.max_dist and item[3:] not in self.excluded] self.blunders = [item for item in self.table - if np.sqrt(np.square(item[0] - x) + np.square(item[1] - y) + np.square(item[2] - z)) > + if np.sqrt( + np.square(item[0] - x) + + np.square(item[1] - y) + + np.square(item[2] - z)) > self.max_dist and item[3:] not in self.excluded] self.solutions = len(self.table) - self.ts_blu = np.array([pyDate.Date(year=item[3], doy=item[4]).fyear for item in self.blunders]) + self.ts_blu = np.array( + [pyDate.Date(year=item[3], doy=item[4]).fyear + for item in self.blunders]) if self.solutions >= 1: a = np.array(self.table) @@ -370,10 +404,13 @@ def __init__(self, cnn, NetworkCode, StationCode): self.x = a[:, 0] self.y = a[:, 1] self.z = a[:, 2] - self.t = np.array([pyDate.Date(year=item[0], doy=item[1]).fyear for item in a[:, 3:5]]) - self.mjd = np.array([pyDate.Date(year=item[0], doy=item[1]).mjd for item in a[:, 3:5]]) + self.t = np.array([pyDate.Date(year=item[0], doy=item[1]).fyear + for item in a[:, 3:5]]) + self.mjd = np.array([pyDate.Date(year=item[0], doy=item[1]).mjd + for item in a[:, 3:5]]) - self.date = [pyDate.Date(year=item[0], doy=item[1]) for item in a[:, 3:5]] + self.date = [pyDate.Date(year=item[0], doy=item[1]) + for item in a[:, 3:5]] # continuous time vector for plots ts = np.arange(np.min(self.mjd), np.max(self.mjd) + 1, 1) @@ -381,10 +418,11 @@ def __init__(self, cnn, NetworkCode, StationCode): self.ts = np.array([pyDate.Date(mjd=tts).fyear for tts in ts]) elif len(self.blunders) >= 1: - raise pyETMException('No viable PPP solutions available for %s (all blunders!)\n' - ' -> min distance to station coordinate is %.1f meters' - % (stn_id, np.array([item[5] - for item in self.blunders]).min())) + raise pyETMException( + 'No viable PPP solutions available for %s (all blunders!)\n' + ' -> min distance to station coordinate is %.1f meters' + % (stn_id, np.array([item[5] + for item in self.blunders]).min())) else: raise pyETMException('No PPP solutions available for %s' % stn_id) @@ -405,11 +443,13 @@ def __init__(self, cnn, NetworkCode, StationCode): self.ts_ns = np.array([item for item in self.rnx_no_ppp]) - self.completion = 100. - float(len(self.ts_ns)) / float(len(self.ts_ns) + len(self.t)) * 100. + self.completion = 100. - float(len(self.ts_ns)) / \ + float(len(self.ts_ns) + len(self.t)) * 100. - ppp_hash = cnn.query_float('SELECT sum(hash) FROM ppp_soln p1 ' - 'WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\'' - % (NetworkCode, StationCode)) + ppp_hash = cnn.query_float( + 'SELECT sum(hash) FROM ppp_soln p1 ' + 'WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\'' + % (NetworkCode, StationCode)) self.hash = crc32(str(len(self.t) + len(self.blunders)) + ' ' + str(self.auto_x) + @@ -446,8 +486,10 @@ def __init__(self, cnn, polyhedrons, NetworkCode, StationCode, stack_name): self.type = 'gamit' # get the station from the stations table - stn = cnn.query_float('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' - % (NetworkCode, StationCode), as_dict=True)[0] + stn = cnn.query_float( + ('SELECT * FROM stations WHERE "NetworkCode" = \'%s\'' + + 'AND "StationCode" = \'%s\'') + % (NetworkCode, StationCode), as_dict=True)[0] if stn['lat'] is not None: self.lat = np.array([float(stn['lat'])]) @@ -474,20 +516,29 @@ def __init__(self, cnn, polyhedrons, NetworkCode, StationCode, stack_name): if np.sqrt(np.square(np.sum(np.square(a[0, 0:3])))) > 6.3e3: # coordinates given in XYZ nb = np.sqrt(np.square(np.sum( - np.square(a[:, 0:3] - np.array([stn['auto_x'], stn['auto_y'], stn['auto_z']])), axis=1))) \ + np.square( + a[:, 0:3] - np.array([stn['auto_x'], + stn['auto_y'], + stn['auto_z']])), axis=1))) \ <= self.max_dist else: # coordinates are differences - nb = np.sqrt(np.square(np.sum(np.square(a[:, 0:3]), axis=1))) <= self.max_dist + nb = np.sqrt(np.square(np.sum(np.square( + a[:, 0:3]), axis=1))) <= self.max_dist if np.any(nb): self.x = a[nb, 0] self.y = a[nb, 1] self.z = a[nb, 2] - self.t = np.array([pyDate.Date(year=item[0], doy=item[1]).fyear for item in a[nb, 3:5]]) - self.mjd = np.array([pyDate.Date(year=item[0], doy=item[1]).mjd for item in a[nb, 3:5]]) + self.t = np.array([pyDate.Date(year=item[0], + doy=item[1]).fyear + for item in a[nb, 3:5]]) + self.mjd = np.array([pyDate.Date(year=item[0], + doy=item[1]).mjd + for item in a[nb, 3:5]]) - self.date = [pyDate.Date(year=item[0], doy=item[1]) for item in a[nb, 3:5]] + self.date = [pyDate.Date(year=item[0], doy=item[1]) + for item in a[nb, 3:5]] # continuous time vector for plots ts = np.arange(np.min(self.mjd), np.max(self.mjd) + 1, 1) @@ -513,14 +564,19 @@ def __init__(self, cnn, polyhedrons, NetworkCode, StationCode, stack_name): 'r."ObservationYear" = p."Year" AND ' 'r."ObservationDOY" = p."DOY" AND ' 'p."name" = \'%s\'' - 'WHERE r."NetworkCode" = \'%s\' AND r."StationCode" = \'%s\' AND ' - 'p."NetworkCode" IS NULL' % (stack_name, NetworkCode, StationCode)) - - # new feature: to avoid problems with realignment of the frame. A change in coordinates was not triggering - # a recalculation of the ETMs + 'WHERE r."NetworkCode" = \'%s\' AND ' + 'r."StationCode" = \'%s\' AND ' + 'p."NetworkCode" IS NULL' % + (stack_name, NetworkCode, StationCode)) + + # new feature: to avoid problems with realignment of the frame. + # A change in coordinates was not triggering a recalculation + # of the ETMs crd = cnn.query_float( - 'SELECT avg("X") + avg("Y") + avg("Z") AS hash FROM stacks WHERE ' - 'name = \'%s\' AND "NetworkCode" = \'%s\' AND "StationCode" = \'%s\'' + 'SELECT avg("X") + avg("Y") + avg("Z") ' + 'AS hash FROM stacks WHERE ' + 'name = \'%s\' AND "NetworkCode" = \'%s\' ' + 'AND "StationCode" = \'%s\'' % (stack_name, NetworkCode, StationCode), as_dict=True) self.rnx_no_ppp = rnx.dictresult() @@ -2264,12 +2320,15 @@ def plot(self, pngfile=None, t_win=None, residuals=False, plot_missing=True, else: - f, axis = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(16, 10)) # type: plt.subplots + f, axis = plt.subplots(nrows=3, ncols=1, sharex=True, + figsize=(16, 10)) # type: plt.subplots f.suptitle(LABEL('station') + ' %s (%s %.2f%%) lat: %.5f lon: %.5f' - % (stationID(self), self.soln.type.upper(), self.soln.completion, + % (stationID(self), self.soln.type.upper(), + self.soln.completion, self.soln.lat, self.soln.lon) + - '\n' + LABEL('not_enough'), fontsize=9, family='monospace') + '\n' + LABEL('not_enough'), fontsize=9, + family='monospace') for i, ax in enumerate((axis[0], axis[1], axis[2])): ax.plot(self.soln.t, lneu[i], 'ob', markersize=2) @@ -2312,7 +2371,8 @@ def onpick(self, event): self.f.canvas.mpl_disconnect(self.cid) self.picking = False print('Epoch: %s' % pyDate.Date(fyear=event.xdata).yyyyddd()) - jtype = int(eval(input(' -- Enter type of jump (0 = mechanic; 1 = geophysical): '))) + jtype = int(eval(input( + ' -- Enter type of jump (0 = mechanic; 1 = geophysical): '))) if jtype == 1: relx = eval(input(' -- Enter relaxation (e.g. 0.5, 0.5,0.01): ')) operation = str(input(' -- Enter operation (+, -): ')) @@ -2331,7 +2391,8 @@ def enable_picking(self, event): if not self.picking: print('Entering picking mode') self.picking = True - self.cid = self.f.canvas.mpl_connect('button_press_event', self.onpick) + self.cid = self.f.canvas.mpl_connect( + 'button_press_event', self.onpick) else: print('Disabling picking mode') self.picking = False @@ -2340,8 +2401,6 @@ def enable_picking(self, event): def plot_hist(self, pngfile=None, fileio=None): import matplotlib.pyplot as plt - import matplotlib.mlab as mlab - from scipy.stats import norm from matplotlib.patches import Ellipse labels = (LABEL('north') + ' [mm]', @@ -2352,17 +2411,22 @@ def plot_hist(self, pngfile=None, fileio=None): filt = self.F[0] * self.F[1] * self.F[2] - f, axis = plt.subplots(nrows=2, ncols=2, figsize=(16, 10)) # type: plt.subplots - - f.suptitle(LABEL('station') + ' %s (%s %.2f%%) lat: %.5f lon: %.5f\n' - 'VAR (N E U) : %s\n' - 'COV (N-E N-U E-U): %s' - % (stationID(self), - self.soln.type.upper(), self.soln.completion, - self.soln.lat, self.soln.lon, - ' '.join('%10.3e' % i for i in np.diag(self.covar)), - ' '.join('%10.3e' % i for i in [self.covar[0, 1], self.covar[0, 2], self.covar[1, 2]])), - fontsize=9, family='monospace') + f, axis = plt.subplots(nrows=2, ncols=2, figsize=(16, 10)) + # type: plt.subplots + + f.suptitle( + LABEL('station') + ' %s (%s %.2f%%) lat: %.5f lon: %.5f\n' + 'VAR (N E U) : %s\n' + 'COV (N-E N-U E-U): %s' + % (stationID(self), + self.soln.type.upper(), self.soln.completion, + self.soln.lat, self.soln.lon, + ' '.join('%10.3e' % i for i in np.diag(self.covar)), + ' '.join('%10.3e' % + i for i in [self.covar[0, 1], + self.covar[0, 2], + self.covar[1, 2]])), + fontsize=9, family='monospace') n = np.sqrt(np.sum(self.R ** 2, axis=0)) N = self.R[0][n <= 0.05] * 1000 @@ -2373,7 +2437,8 @@ def plot_hist(self, pngfile=None, fileio=None): ax = axis[0][0] ax.plot(E, N, 'ob', markersize=2) # ax.plot(E[filt], N[filt], 'ob', markersize=2) - # ax.plot(E[np.logical_not(filt)], N[np.logical_not(filt)], 'oc', markersize=2) + # ax.plot(E[np.logical_not(filt)], N[np.logical_not(filt)], + # 'oc', markersize=2) # process the covariance matrix c = self.covar[0:2, 0:2] @@ -2396,7 +2461,8 @@ def plot_hist(self, pngfile=None, fileio=None): ax.grid(True) ax.set_ylabel(labels[0]) ax.set_xlabel(labels[1]) - ax.set_title("%s %s-%s" % (LABEL('residual plot'), LABEL('north'), LABEL('east'))) + ax.set_title("%s %s-%s" % (LABEL('residual plot'), LABEL('north'), + LABEL('east'))) ax.axis('equal') f.canvas.draw() ax.legend() @@ -2406,7 +2472,8 @@ def plot_hist(self, pngfile=None, fileio=None): # N histogram ax = axis[0][1] # (mu, sigma) = norm.fit(N) - n, bins, patches = ax.hist(N, 200, alpha=0.75, facecolor='blue', orientation='horizontal') + n, bins, patches = ax.hist(N, 200, alpha=0.75, facecolor='blue', + orientation='horizontal') # y = mlab.normpdf(bins, mu, sigma) # ax.plot(y, bins, 'r--', linewidth=2) ax.grid(True) @@ -2438,17 +2505,21 @@ def plot_hist(self, pngfile=None, fileio=None): ax.set_xlabel(LABEL('U residuals') + ' [mm]') ax.set_title(LABEL('histogram plot') + ' ' + LABEL('up')) - # residuals = np.sqrt(np.square(L[0]) + np.square(L[1]) + np.square(L[2])) - \ - # np.sqrt(np.square(np.dot(self.A, self.C[0])) + np.square(np.dot(self.A, self.C[1])) + - # np.square(np.dot(self.A, self.C[2]))) + # residuals = np.sqrt(np.square(L[0]) + np.square(L[1]) + \ + # np.square(L[2])) - \ + # np.sqrt(np.square(np.dot(self.A, self.C[0])) + \ + # np.square(np.dot(self.A, self.C[1])) + \ + # np.square(np.dot(self.A, self.C[2]))) # (mu, sigma) = norm.fit(residuals) - # n, bins, patches = plt.hist(residuals, 200, normed=1, alpha=0.75, facecolor='blue') + # n, bins, patches = plt.hist(residuals, 200, normed=1, + # alpha=0.75, facecolor='blue') # y = mlab.normpdf(bins, mu, sigma) # plt.plot(bins, y, 'r--', linewidth=2) - # plt.title(r'$\mathrm{Histogram\ of\ residuals (mm):}\ \mu=%.3f,\ \sigma=%.3f$' % (mu*1000, sigma*1000)) + # plt.title(r'$\mathrm{Histogram\ of\ residuals (mm):}\ + # \mu=%.3f,\ \sigma=%.3f$' % (mu*1000, sigma*1000)) # plt.grid(True) if pngfile is not None: @@ -2466,9 +2537,11 @@ def plot_hist(self, pngfile=None, fileio=None): @staticmethod def autoscale_y(ax, margin=0.1): - """This function rescales the y-axis based on the data that is visible given the current xlim of the axis. + """This function rescales the y-axis based on the data that is visible + given the current xlim of the axis. ax -- a matplotlib axes object - margin -- the fraction of the total height of the y-data to pad the upper and lower ylims""" + margin -- the fraction of the total height of the y-data to pad the + upper and lower ylims""" def get_bottom_top(line): xd = line.get_xdata() @@ -2492,7 +2565,7 @@ def get_bottom_top(line): if bot == top: ax.autoscale(enable=True, axis='y', tight=False) - #ax.autoscale(enable=False, axis='y', tight=False) + # ax.autoscale(enable=False, axis='y', tight=False) else: ax.set_ylim(bot, top) @@ -2501,9 +2574,9 @@ def set_lims(self, t_win, plt, ax): if t_win is None: # turn on to adjust the limits, then turn off to plot jumps ax.autoscale(enable=True, axis='x', tight=False) - #ax.autoscale(enable=False, axis='x', tight=False) + # ax.autoscale(enable=False, axis='x', tight=False) ax.autoscale(enable=True, axis='y', tight=False) - #ax.autoscale(enable=False, axis='y', tight=False) + # ax.autoscale(enable=False, axis='y', tight=False) else: if t_win[0] == t_win[1]: t_win[0] = t_win[0] - 1. / 365.25 @@ -2520,7 +2593,8 @@ def plot_missing_soln(self, ax): # plot the position of the outliers for blunder in self.soln.ts_blu: - ax.quiver((blunder, blunder), ax.get_ylim(), (0, 0), (-0.01, 0.01), scale_units='height', + ax.quiver((blunder, blunder), ax.get_ylim(), (0, 0), + (-0.01, 0.01), scale_units='height', units='height', pivot='tip', width=0.008, edgecolors='r') def plot_jumps(self, ax): @@ -2575,12 +2649,14 @@ def todictionary(self, time_series=False, model=False): 'e': self.factor[1], 'u': self.factor[2]} - etm['xyz_covariance'] = self.rotate_sig_cov(covar=self.covar).tolist() + etm['xyz_covariance'] = self.rotate_sig_cov( + covar=self.covar).tolist() etm['neu_covariance'] = self.covar.tolist() if time_series: etm['time_series'] = { - 't': np.array([self.soln.t.tolist(), self.soln.mjd.tolist()]).transpose().tolist(), + 't': np.array([self.soln.t.tolist(), + self.soln.mjd.tolist()]).transpose().tolist(), 'mjd': self.soln.mjd.tolist(), 'x': self.soln.x.tolist(), 'y': self.soln.y.tolist(), @@ -2592,20 +2668,23 @@ def todictionary(self, time_series=False, model=False): 'residuals': self.R.tolist(), 'weights': self.P.transpose().tolist(), - 'model_neu': [] if self.A is None or not model else \ - [(np.dot(self.As, self.C[i]).tolist()) for i in range(3)], + 'model_neu': [] if self.A is None or not model + else [(np.dot(self.As, self.C[i]).tolist()) for i in range(3)], - 'filter': [] if self.A is None else \ - np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]).tolist() + 'filter': [] if self.A is None + else np.logical_and(np.logical_and(self.F[0], self.F[1]), + self.F[2]).tolist() } return etm - def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FLOOR_V, force_model=False): - # this function find the requested epochs and returns an X Y Z and sigmas + def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, + sigma_v=SIGMA_FLOOR_V, force_model=False): + # this function find the requested epochs and returns X Y Z and sigmas # jmp = 'pre' returns the coordinate immediately before a jump # jmp = 'post' returns the coordinate immediately after a jump - # jmp = None returns either the coordinate before or after, depending on the time of the jump. + # jmp = None returns either the coordinate before or after, + # depending on the time of the jump. # find this epoch in the t vector date = pyDate.Date(year=year, doy=doy) @@ -2613,14 +2692,17 @@ def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FL for jump in self.Jumps.table: if jump.date == date and \ - jump.p.jump_type in (GENERIC_JUMP, CO_SEISMIC_JUMP_DECAY, ANTENNA_CHANGE, CO_SEISMIC_JUMP) and \ + jump.p.jump_type in (GENERIC_JUMP, CO_SEISMIC_JUMP_DECAY, + ANTENNA_CHANGE, CO_SEISMIC_JUMP) and \ jump.fit and \ np.sqrt(np.sum(np.square(jump.p.params[:, 0]))) > 0.02: window = jump.date - # if no pre or post specified, then determine using the time of the jump + # if no pre or post specified, then determine using the time + # of the jump if jmp is None: - if (jump.date.datetime().hour + jump.date.datetime().minute / 60.0) < 12: + if (jump.date.datetime().hour + + jump.date.datetime().minute / 60.0) < 12: jmp = 'post' else: jmp = 'pre' @@ -2646,7 +2728,8 @@ def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FL # the coordinate is good xyz = L[:, index] sig = self.R[:, index] - source = self.soln.stack_name.upper() + ' with ETM solution: good' + source = self.soln.stack_name.upper() + \ + ' with ETM solution: good' else: # the coordinate is marked as bad @@ -2657,9 +2740,11 @@ def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FL neu[i] = np.dot(self.As[idt, :], self.C[i]) xyz = self.rotate_2xyz(neu) + ref_pos - # Use the deviation from the ETM multiplied by 2.5 to estimate the error + # Use the deviation from the ETM multiplied by 2.5 to estimate + # the error sig = 2.5 * self.R[:, index] - source = self.soln.stack_name.upper() + ' with ETM solution: filtered' + source = self.soln.stack_name.upper() + \ + ' with ETM solution: filtered' elif not index.size and self.A is not None: @@ -2677,7 +2762,8 @@ def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FL elif index.size and self.A is None: - # no ETM (too few points), but we have a solution for the requested day + # no ETM (too few points), but we have a solution for the + # requested day xyz = L[:, index] # set the uncertainties in NEU by hand sig = np.array([[9.99], [9.99], [9.99]]) @@ -2685,7 +2771,8 @@ def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FL else: # no ETM (too few points) and no solution for this day, get average - source = 'No ' + self.soln.stack_name.upper() + ' solution, no ETM: mean coordinate' + source = 'No ' + self.soln.stack_name.upper() + \ + ' solution, no ETM: mean coordinate' xyz = np.mean(L, axis=1)[:, np.newaxis] # set the uncertainties in NEU by hand sig = np.array([[9.99], [9.99], [9.99]]) @@ -2701,22 +2788,27 @@ def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FL source += '. fast moving station, bumping up sigmas' # apply floor sigmas - sig = np.sqrt(np.square(sig) + np.square(np.array([[sigma_h], [sigma_h], [sigma_v]]))) + sig = np.sqrt(np.square(sig) + np.square(np.array( + [[sigma_h], [sigma_h], [sigma_v]]))) return xyz, sig, window, source def rotate_2neu(self, ecef): - return np.array(ct2lg(ecef[0], ecef[1], ecef[2], self.soln.lat, self.soln.lon)) + return np.array(ct2lg(ecef[0], ecef[1], ecef[2], self.soln.lat, + self.soln.lon)) def rotate_2xyz(self, neu): - return np.array(lg2ct(neu[0], neu[1], neu[2], self.soln.lat, self.soln.lon)) + return np.array(lg2ct(neu[0], neu[1], neu[2], self.soln.lat, + self.soln.lon)) def rotate_sig_cov(self, sigmas=None, covar=None): if sigmas is None and covar is None: - raise pyETMException('Error in rotate_sig_cov: must provide either sigmas or covariance matrix') + raise pyETMException( + 'Error in rotate_sig_cov: must provide either sigmas ' + 'or covariance matrix') R = rotlg2ct(self.soln.lat, self.soln.lon) @@ -2748,8 +2840,8 @@ def rotate_sig_cov(self, sigmas=None, covar=None): def nearestPD(self, A): """Find the nearest positive-definite matrix to input - A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which - credits [2]. + A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], + which credits [2]. [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd @@ -2770,14 +2862,15 @@ def nearestPD(self, A): return A3 spacing = np.spacing(np.linalg.norm(A)) - # The above is different from [1]. It appears that MATLAB's `chol` Cholesky - # decomposition will accept matrixes with exactly 0-eigenvalue, whereas - # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab - # for `np.spacing`), we use the above definition. CAVEAT: our `spacing` - # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on - # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas - # `spacing` will, for Gaussian random matrixes of small dimension, be on - # othe order of 1e-16. In practice, both ways converge, as the unit test + # The above is different from [1]. It appears that MATLAB's `chol` + # Cholesky decomposition will accept matrixes with exactly + # 0-eigenvalue, whereas Numpy's will not. So where [1] uses + # `eps(mineig)` (where `eps` is Matlab for `np.spacing`), we use the + # above definition. CAVEAT: our `spacing` will be much larger than + # [1]'s `eps(mineig)`, since `mineig` is usually on the order of + # 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas `spacing` + # will, for Gaussian random matrixes of small dimension, be on the + # order of 1e-16. In practice, both ways converge, as the unit test # below suggests. I = np.eye(A.shape[0]) k = 1 @@ -2913,7 +3006,8 @@ def adjust_lsq(self, Ai, Li): else: break # cst_pass = True - # make sure there are no values below eps. Otherwise matrix becomes singular + # make sure there are no values below eps. + # Otherwise matrix becomes singular P[P < np.finfo(float).eps] = 1e-6 # some statistics @@ -2953,16 +3047,19 @@ def chi2inv(chi, df): return np.min(sum) @staticmethod - def warn_with_traceback(message, category, filename, lineno, file=None, line=None): + def warn_with_traceback(message, category, filename, lineno, file=None, + line=None): log = file if hasattr(file, 'write') else sys.stderr traceback.print_stack(file=log) - log.write(warnings.formatwarning(message, category, filename, lineno, line)) + log.write(warnings.formatwarning( + message, category, filename, lineno, line)) def get_outliers_list(self): """ Function to obtain the outliers based on the ETMs sigma - :return: a list containing the network code, station code and dates of the outliers in the time series + :return: a list containing the network code, station code and dates of + the outliers in the time series """ filt = self.F[0] * self.F[1] * self.F[2] @@ -2972,38 +3069,47 @@ def get_outliers_list(self): class PPPETM(ETM): - def __init__(self, cnn, NetworkCode, StationCode, plotit=False, no_model=False, models=(), ignore_db_params=False, + def __init__(self, cnn, NetworkCode, StationCode, plotit=False, + no_model=False, models=(), ignore_db_params=False, plot_remove_jumps=False, plot_polynomial_removed=False): # load all the PPP coordinates available for this station - # exclude ppp solutions in the exclude table and any solution that is more than 100 meters from the auto coord + # exclude ppp solutions in the exclude table and any solution that + # is more than 100 meters from the auto coord self.ppp_soln = PppSoln(cnn, NetworkCode, StationCode) - ETM.__init__(self, cnn, self.ppp_soln, no_model, plotit=plotit, models=models, - ignore_db_params=ignore_db_params, plot_remove_jumps=plot_remove_jumps, + ETM.__init__(self, cnn, self.ppp_soln, no_model, plotit=plotit, + models=models, + ignore_db_params=ignore_db_params, + plot_remove_jumps=plot_remove_jumps, plot_polynomial_removed=plot_polynomial_removed) class GamitETM(ETM): - def __init__(self, cnn, NetworkCode, StationCode, plotit=False, no_model=False, gamit_soln=None, - stack_name=None, models=(), ignore_db_params=False, plot_remove_jumps=False, + def __init__(self, cnn, NetworkCode, StationCode, plotit=False, + no_model=False, gamit_soln=None, + stack_name=None, models=(), ignore_db_params=False, + plot_remove_jumps=False, plot_polynomial_removed=False): if gamit_soln is None: - self.polyhedrons = cnn.query_float('SELECT "X", "Y", "Z", "Year", "DOY" FROM stacks ' - 'WHERE "name" = \'%s\' AND "NetworkCode" = \'%s\' AND ' - '"StationCode" = \'%s\' ' - 'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"' - % (stack_name, NetworkCode, StationCode)) + self.polyhedrons = cnn.query_float( + 'SELECT "X", "Y", "Z", "Year", "DOY" FROM stacks ' + 'WHERE "name" = \'%s\' AND "NetworkCode" = \'%s\' AND ' + '"StationCode" = \'%s\' ' + 'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"' + % (stack_name, NetworkCode, StationCode)) - self.gamit_soln = GamitSoln(cnn, self.polyhedrons, NetworkCode, StationCode, stack_name) + self.gamit_soln = GamitSoln(cnn, self.polyhedrons, NetworkCode, + StationCode, stack_name) else: # load the GAMIT polyhedrons self.gamit_soln = gamit_soln - ETM.__init__(self, cnn, self.gamit_soln, no_model, plotit=plotit, ignore_db_params=ignore_db_params, + ETM.__init__(self, cnn, self.gamit_soln, no_model, plotit=plotit, + ignore_db_params=ignore_db_params, models=models, plot_remove_jumps=plot_remove_jumps, plot_polynomial_removed=plot_polynomial_removed) @@ -3013,7 +3119,8 @@ def get_etm_soln_list(self, use_ppp_model=False, cnn=None): stn_id = stationID(self) if self.A is None: - raise pyETMException_NoDesignMatrix('No design matrix available for %s' % stn_id) + raise pyETMException_NoDesignMatrix( + 'No design matrix available for %s' % stn_id) elif not use_ppp_model: # get residuals from GAMIT solutions to GAMIT model @@ -3023,11 +3130,14 @@ def get_etm_soln_list(self, use_ppp_model=False, cnn=None): # get residuals from GAMIT solutions to PPP model etm = PPPETM(cnn, self.NetworkCode, self.StationCode) if etm.A is None: - raise pyETMException_NoDesignMatrix('No PPP design matrix available for %s' % stn_id) + raise pyETMException_NoDesignMatrix( + 'No PPP design matrix available for %s' % stn_id) else: - # DDG: 20-SEP-2018 compare using MJD not FYEAR to avoid round off errors + # DDG: 20-SEP-2018 compare using MJD not FYEAR to avoid round + # off errors index = np.isin(etm.soln.mjds, self.soln.mjd) - # use the etm object to obtain the design matrix that matches the dimensions of self.soln.t + # use the etm object to obtain the design matrix that matches + # the dimensions of self.soln.t neu = [np.dot(etm.As[index, :], etm.C[i]) for i in range(3)] @@ -3051,13 +3161,15 @@ def __init__(self, cnn, NetworkCode, StationCode, plotit=False, no_model=False, gamit_soln=None, project=None): if gamit_soln is None: - # self.polyhedrons = cnn.query_float('SELECT "X", "Y", "Z", "Year", "DOY" FROM gamit_soln ' - # 'WHERE "Project" = \'%s\' AND "NetworkCode" = \'%s\' AND ' - # '"StationCode" = \'%s\' ' - # 'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"' - # % (project, NetworkCode, StationCode)) - - # self.gamit_soln = GamitSoln(cnn, self.polyhedrons, NetworkCode, StationCode, project) + # self.polyhedrons = cnn.query_float( + # 'SELECT "X", "Y", "Z", "Year", "DOY" FROM gamit_soln ' + # 'WHERE "Project" = \'%s\' AND "NetworkCode" = \'%s\' AND ' + # '"StationCode" = \'%s\' ' + # 'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"' + # % (project, NetworkCode, StationCode)) + + # self.gamit_soln = GamitSoln( + # cnn, self.polyhedrons, NetworkCode, StationCode, project) raise ValueError('DailyRep class requires a gamit_soln object') else: @@ -3066,20 +3178,23 @@ def __init__(self, cnn, NetworkCode, StationCode, plotit=False, # the the solution type to dra self.soln.type = 'dra' - # replace auto_[xyz] with zeros so that in ETM.__init__ the self.l vector is realized properly - # DRA requires coordinates differences, not coordinates relative to reference + # replace auto_[xyz] with zeros so that in ETM.__init__ the + # self.l vector is realized properly + # DRA requires coordinates differences, not coordinates relative + # to reference self.soln.auto_x = 0 self.soln.auto_y = 0 self.soln.auto_z = 0 - ETM.__init__(self, cnn, self.soln, no_model, FitEarthquakes=False, FitGenericJumps=False, - FitPeriodic=False, plotit=plotit) + ETM.__init__(self, cnn, self.soln, no_model, FitEarthquakes=False, + FitGenericJumps=False, FitPeriodic=False, plotit=plotit) def get_residuals_dict(self): # this function return the values of the ETM ONLY if self.A is None: - raise pyETMException_NoDesignMatrix('No design matrix available for %s' % stationID(self)) + raise pyETMException_NoDesignMatrix( + 'No design matrix available for %s' % stationID(self)) neu = [np.dot(self.A, self.C[i]) for i in range(3)] @@ -3093,7 +3208,8 @@ def get_residuals_dict(self): py = np.ones(self.P[1].shape) pz = np.ones(self.P[2].shape) - return [(self.NetworkCode, self.StationCode, x, y, z, sigx, sigy, sigz, date.year, date.doy) + return [(self.NetworkCode, self.StationCode, x, y, z, sigx, sigy, sigz, + date.year, date.doy) for x, y, z, sigx, sigy, sigz, date in zip(rxyz[0], rxyz[1], @@ -3106,11 +3222,12 @@ def get_residuals_dict(self): class FileETM(ETM): - def __init__(self, cnn, poly_list=None, plotit=False, no_model=False, plot_remove_jumps=False, - plot_polynomial_removed=False): + def __init__(self, cnn, poly_list=None, plotit=False, no_model=False, + plot_remove_jumps=False, plot_polynomial_removed=False): # self.soln.type = 'file' - ETM.__init__(self, cnn, poly_list, no_model, plotit=plotit, ignore_db_params=True, - plot_remove_jumps=plot_remove_jumps, plot_polynomial_removed=plot_polynomial_removed) - + ETM.__init__(self, cnn, poly_list, no_model, plotit=plotit, + ignore_db_params=True, + plot_remove_jumps=plot_remove_jumps, + plot_polynomial_removed=plot_polynomial_removed)