@@ -314,6 +314,7 @@ def setup_parameters(
314
314
sigma = 1. ,
315
315
sigmaA = 1. ,
316
316
step_width = 0.03 ,
317
+ summaries_path = None ,
317
318
trajectory_file = None ,
318
319
use_reweighting = False ,
319
320
verbose = 0 ):
@@ -361,6 +362,7 @@ def setup_parameters(
361
362
sigma = sigma ,
362
363
sigmaA = sigmaA ,
363
364
step_width = step_width ,
365
+ summaries_path = summaries_path ,
364
366
trajectory_file = trajectory_file ,
365
367
use_reweighting = use_reweighting ,
366
368
verbose = verbose )
@@ -790,7 +792,10 @@ def sample(self, return_run_info = False, return_trajectories = False, return_av
790
792
for walker_index in range (self .FLAGS .number_walkers )]
791
793
792
794
list_of_nodes = ["sample_step" , "accuracy" , "global_step" , "loss" ]
793
- test_nodes = [[self .summary ]* self .FLAGS .number_walkers ]
795
+ if self .FLAGS .summaries_path is not None :
796
+ test_nodes = [self .summary ]* self .FLAGS .number_walkers
797
+ else :
798
+ test_nodes = []
794
799
for item in list_of_nodes :
795
800
test_nodes .append ([self .nn [walker_index ].get (item ) \
796
801
for walker_index in range (self .FLAGS .number_walkers )])
@@ -889,6 +894,10 @@ def sample(self, return_run_info = False, return_trajectories = False, return_av
889
894
assert (check_accepted [walker_index ] == 0 )
890
895
assert (check_rejected [walker_index ] == 0 )
891
896
897
+ # prepare summaries for TensorBoard
898
+ if self .FLAGS .summaries_path is not None :
899
+ summary_writer = tf .summary .FileWriter (self .FLAGS .summaries_path , self .sess .graph )
900
+
892
901
logging .info ("Starting to sample" )
893
902
logging .info_intervals = max (1 , int (self .FLAGS .max_steps / 100 ))
894
903
last_time = time .process_time ()
@@ -980,8 +989,12 @@ def sample(self, return_run_info = False, return_trajectories = False, return_av
980
989
# or
981
990
# tf.run([loss_eval, train_step], ...)
982
991
# is not important. Only a subsequent, distinct tf.run() call would produce a different loss_eval.
983
- summary , _ , acc , global_step , loss_eval = \
984
- self .sess .run (test_nodes , feed_dict = feed_dict )
992
+ if self .FLAGS .summaries_path is not None :
993
+ summary , _ , acc , global_step , loss_eval = \
994
+ self .sess .run (test_nodes , feed_dict = feed_dict )
995
+ else :
996
+ _ , acc , global_step , loss_eval = \
997
+ self .sess .run (test_nodes , feed_dict = feed_dict )
985
998
986
999
if self .FLAGS .sampler in ["StochasticGradientLangevinDynamics" ,
987
1000
"GeometricLangevinAlgorithm_1stOrder" ,
@@ -1012,6 +1025,12 @@ def sample(self, return_run_info = False, return_trajectories = False, return_av
1012
1025
self .static_vars ["virials" ],
1013
1026
self .static_vars ["noise" ]])
1014
1027
1028
+ if self .FLAGS .summaries_path is not None :
1029
+ run_options = tf .RunOptions (trace_level = tf .RunOptions .FULL_TRACE )
1030
+ run_metadata = tf .RunMetadata ()
1031
+ summary_writer .add_run_metadata (run_metadata , 'step%d' % current_step )
1032
+ summary_writer .add_summary (summary , current_step )
1033
+
1015
1034
for walker_index in range (self .FLAGS .number_walkers ):
1016
1035
if current_step >= self .FLAGS .burn_in_steps :
1017
1036
accumulated_loss_nominator [walker_index ] += loss_eval [walker_index ] * exp (- self .FLAGS .inverse_temperature * loss_eval [walker_index ])
@@ -1168,6 +1187,10 @@ def sample(self, return_run_info = False, return_trajectories = False, return_av
1168
1187
1169
1188
logging .info ("SAMPLED." )
1170
1189
1190
+ # close summaries file
1191
+ if self .FLAGS .summaries_path is not None :
1192
+ summary_writer .close ()
1193
+
1171
1194
return run_info , trajectory , averages
1172
1195
1173
1196
def _create_default_feed_dict_with_constants (self , walker_index = 0 ):
@@ -1229,9 +1252,13 @@ def train(self, walker_index=0, return_run_info = False, return_trajectories = F
1229
1252
assert ( walker_index < self .FLAGS .number_walkers )
1230
1253
1231
1254
placeholder_nodes = self .nn [walker_index ].get_dict_of_nodes (["learning_rate" , "y_" ])
1232
- test_nodes = [self .summary ]+ self .nn [walker_index ].get_list_of_nodes (
1255
+ if self .FLAGS .summaries_path is not None :
1256
+ test_nodes = [self .summary ]
1257
+ else :
1258
+ test_nodes = []
1259
+ test_nodes .extend (self .nn [walker_index ].get_list_of_nodes (
1233
1260
["train_step" , "accuracy" , "global_step" , "loss" , "y_" , "y" ]) \
1234
- + [self .static_vars ["gradients" ]]
1261
+ + [self .static_vars ["gradients" ]])
1235
1262
1236
1263
output_width = 8
1237
1264
output_precision = 8
@@ -1269,8 +1296,13 @@ def train(self, walker_index=0, return_run_info = False, return_trajectories = F
1269
1296
np .zeros ((steps , no_params )),
1270
1297
columns = header )
1271
1298
1299
+
1272
1300
default_feed_dict = self ._create_default_feed_dict_with_constants (walker_index )
1273
1301
1302
+ # prepare summaries for TensorBoard
1303
+ if self .FLAGS .summaries_path is not None :
1304
+ summary_writer = tf .summary .FileWriter (self .FLAGS .summaries_path , self .sess .graph )
1305
+
1274
1306
logging .info ("Starting to train" )
1275
1307
last_time = time .process_time ()
1276
1308
elapsed_time = 0
@@ -1306,14 +1338,24 @@ def train(self, walker_index=0, return_run_info = False, return_trajectories = F
1306
1338
weights_eval = self .weights [walker_index ].evaluate (self .sess )
1307
1339
biases_eval = self .biases [walker_index ].evaluate (self .sess )
1308
1340
1309
- summary , _ , acc , global_step , loss_eval , y_true_eval , y_eval , scaled_grad = \
1310
- self .sess .run (test_nodes , feed_dict = feed_dict )
1341
+ if self .FLAGS .summaries_path is not None :
1342
+ summary , _ , acc , global_step , loss_eval , y_true_eval , y_eval , scaled_grad = \
1343
+ self .sess .run (test_nodes , feed_dict = feed_dict )
1344
+ else :
1345
+ _ , acc , global_step , loss_eval , y_true_eval , y_eval , scaled_grad = \
1346
+ self .sess .run (test_nodes , feed_dict = feed_dict )
1311
1347
1312
1348
gradients , virials = self .sess .run ([self .static_vars ["gradients" ][walker_index ],
1313
1349
self .static_vars ["virials" ][walker_index ]])
1314
1350
if current_step >= self .FLAGS .burn_in_steps :
1315
1351
accumulated_virials += virials
1316
1352
1353
+ if self .FLAGS .summaries_path is not None :
1354
+ run_options = tf .RunOptions (trace_level = tf .RunOptions .FULL_TRACE )
1355
+ run_metadata = tf .RunMetadata ()
1356
+ summary_writer .add_run_metadata (run_metadata , 'step%d' % current_step )
1357
+ summary_writer .add_summary (summary , current_step )
1358
+
1317
1359
if current_step % self .FLAGS .every_nth == 0 :
1318
1360
current_time = time .process_time ()
1319
1361
time_elapsed_per_nth_step = current_time - last_time
@@ -1377,6 +1419,10 @@ def train(self, walker_index=0, return_run_info = False, return_trajectories = F
1377
1419
#logging.debug('y at step %s: %s' % (i, str(y_eval[0:9].transpose())))
1378
1420
logging .info ("TRAINED down to loss %s and accuracy %s." % (loss_eval , acc ))
1379
1421
1422
+ # close summaries file
1423
+ if self .FLAGS .summaries_path is not None :
1424
+ summary_writer .close ()
1425
+
1380
1426
return run_info , trajectory , averages
1381
1427
1382
1428
def compute_optimal_stepwidth (self , walker_index = 0 ):
0 commit comments