ray_hook = RayTuneReportingHook(params={'mean_loss': 'sparse_softmax_cross_entropy_loss/value', 'mean_validation_accuracy': 'classification_accuracy/Mean'}, reporter=reporter) my_class.estimator(lambda: cross_validator.get_train_iterator(split, lambda x: my_class.parse_example(x)), lambda: cross_validator.get_eval_iterator(split, lambda x: my_class.parse_example(x)), params, max_steps=100000, eval_hooks=[ray_hook]) #Notes: #Set the ReportingHook params to a dict mapping the TrainableResult values to either tensors or tensor names. It should be # able to resolve it. This will report a value to ray everytime eval is run. I have not figured out how to aggregate # things like averages across batches in a single evaluation run.