Before(IR Dump After TensorFlowShapeInferencePass):
%417 = "mhlo.slice"(%226) {limit_indices = dense<1> : tensor<1xi64>, start_indices = dense<0> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} : (tensor<2xi32>) -> tensor<1xi32>
%418 = "mhlo.reshape"(%417) : (tensor<1xi32>) -> tensor<i32>
%419 = "tf.Tile"(%96, %417) {device = ""} : (tensor<1xi32>, tensor<1xi32>) -> tensor<?xi32>
%420 = shape.shape_of %419 : tensor<?xi32> -> tensor<?xindex>
%421 = tensor.cast %420 : tensor<?xindex> to tensor<1xindex>
%422 = "mhlo.dynamic_broadcast_in_dim"(%7, %421) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<i32>, tensor<1xindex>) -> tensor<?xi32>
%423 = mhlo.multiply %419, %422 : tensor<?xi32>
After(IR Dump After TensorFlowShapeInferencePass Failed):
%417 = "mhlo.slice"(%226) {limit_indices = dense<1> : tensor<1xi64>, start_indices = dense<0> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} : (tensor<2xi32>) -> tensor<1xi32>
%418 = "mhlo.reshape"(%417) : (tensor<1xi32>) -> tensor<i32>
%419 = "tf.Tile"(%96, %417) {device = ""} : (tensor<1xi32>, tensor<1xi32>) -> tensor<?xi32>
%420 = "shape.shape_of"(%419) : (tensor<?xi32>) -> tensor<?xindex>
%421 = "tensor.cast"(%420) : (tensor<?xindex>) -> tensor<?xindex>
%422 = "tf.Cast"(%421) {Truncate = false} : (tensor<?xindex>) -> tensor<1xindex>
%423 = "mhlo.dynamic_broadcast_in_dim"(%7, %422) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<i32>, tensor<1xindex>) -> tensor<?xi32>
%424 = "mhlo.multiply"(%419, %423) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
It's a pity that I can't recurrent this feature in a small func even using the same op. So I have to cut out them from a big model print log.