-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtensorboard_modded.py
More file actions
50 lines (41 loc) · 1.76 KB
/
tensorboard_modded.py
File metadata and controls
50 lines (41 loc) · 1.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import os
import sys
# Try to mute and then load TensorFlow and Keras
# Muting seems to not work lately on Linux in any way
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
stdin = sys.stdin
sys.stdin = open(os.devnull, 'w')
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from tensorflow.keras.callbacks import Callback
sys.stdin = stdin
sys.stderr = stderr
# Own Tensorboard class giving ability to use single writer across multiple .fit() calls
# Allows us also to easily log additional data
# Dramatically decreases amount of data being saved into Tensorboard logs and write time (as appends to one file)
class ModifiedTensorBoard(Callback):
# Set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, log_dir):
self.step = 1
self.log_dir = log_dir
self.writer = tf.summary.FileWriter(self.log_dir)
# Saves logs with our step number (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(self.step, **logs)
# Custom method for saving own (and also internal) metrics (can be called externally)
def update_stats(self, step, **stats):
self._write_logs(stats, step)
# More or less the same writer as in Keras' Tensorboard callback
# Physically writes to the log files
def _write_logs(self, logs, index):
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, index)
self.writer.flush()