Hi XLA-devs.
I am looking at *.after_optimization.txt generated by XLA HLO logs. I find several instructions whose metadata doesn't have op_type.
Is that expected? Or is that a bug?
Thanks,
Garvit
Here's an example. reduce, convert, select instruction metadata don't have op type.
%fused_computation.116 (param_0.426: f16[65536,512], param_1.549: f16[65536,512]) -> (f32[512], f16[65536,512]) {
%param_1.549 = f16[65536,512]{1,0} parameter(1)
%constant_7_clone_1 = f16[] constant(0), metadata={op_type="StridedSliceGrad" op_name="Adam/gradients_1/gradients/dlrm/StatefulPartitionedCall_grad/PartitionedCall/gradients/StatefulPartitionedCall_grad/PartitionedCall/gradients/strided_slice_grad/StridedSliceGrad"}
%broadcast.59.clone.1 = f16[65536,512]{1,0} broadcast(f16[] %constant_7_clone_1), dimensions={}, metadata={op_name="Adam/gradients_1/gradients/dlrm/StatefulPartitionedCall_grad/PartitionedCall/gradients/StatefulPartitionedCall_grad/PartitionedCall/gradients/bottom_model_1/dense/Relu_grad/ReluGrad"}
%compare.0.clone.1 = pred[65536,512]{1,0} compare(f16[65536,512]{1,0} %param_1.549, f16[65536,512]{1,0} %broadcast.59.clone.1), direction=GT, metadata={op_name="Adam/gradients_1/gradients/dlrm/StatefulPartitionedCall_grad/PartitionedCall/gradients/StatefulPartitionedCall_grad/PartitionedCall/gradients/bottom_model_1/dense/Relu_grad/ReluGrad"}
%param_0.426 = f16[65536,512]{1,0} parameter(0)
%select.0.clone.1 = f16[65536,512]{1,0} select(pred[65536,512]{1,0} %compare.0.clone.1, f16[65536,512]{1,0} %param_0.426, f16[65536,512]{1,0} %broadcast.59.clone.1), metadata={op_name="Adam/gradients_1/gradients/dlrm/StatefulPartitionedCall_grad/PartitionedCall/gradients/StatefulPartitionedCall_grad/PartitionedCall/gradients/bottom_model_1/dense/Relu_grad/ReluGrad"}
%convert.61 = f32[65536,512]{1,0} convert(f16[65536,512]{1,0} %select.0.clone.1), metadata={op_name="Adam/gradients_1/gradients/dlrm/StatefulPartitionedCall_grad/PartitionedCall/gradients/StatefulPartitionedCall_grad/PartitionedCall/gradients/bottom_model_1/dense/BiasAdd_grad/BiasAddGrad"}
%constant_4 = f32[] constant(0), metadata={op_type="BiasAddGrad" op_name="Adam/gradients_1/gradients/dlrm/StatefulPartitionedCall_grad/PartitionedCall/gradients/StatefulPartitionedCall_grad/PartitionedCall/gradients/top_model/dense_7/BiasAdd_grad/BiasAddGrad"}
%reduce.126 = f32[512]{0} reduce(f32[65536,512]{1,0} %convert.61, f32[] %constant_4), dimensions={0}, to_apply=%region_0.530, metadata={op_name="Adam/gradients_1/gradients/dlrm/StatefulPartitionedCall_grad/PartitionedCall/gradients/StatefulPartitionedCall_grad/PartitionedCall/gradients/bottom_model_1/dense/BiasAdd_grad/BiasAddGrad"}
ROOT %tuple.1 = (f32[512]{0}, f16[65536,512]{1,0}) tuple(f32[512]{0} %reduce.126, f16[65536,512]{1,0} %select.0.clone.1)
}