module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 723 : i32}} {
func @main(%arg0: tensor<32x28x28xf32>) -> tensor<32x10xf32> attributes {llvm.emit_c_interface, tf.entry_function = {control_outputs = "", inputs = "x", outputs = "Identity"}} {
%0 = "tf.Const"() {value = dense<[-1, 784]> : tensor<2xi32>} : () -> tensor<2xi32>
%1 = "tf.Placeholder"() {device = "", shape = #tf.shape<>} : () -> tensor<!tf.resource>
%2 = "tf.ReadVariableOp"(%1) {device = ""} : (tensor<!tf.resource>) -> tensor<128xf32>
%3 = "tf.ReadVariableOp"(%1) {device = ""} : (tensor<!tf.resource>) -> tensor<784x128xf32>
%4 = "tf.ReadVariableOp"(%1) {device = ""} : (tensor<!tf.resource>) -> tensor<10xf32>
%5 = "tf.ReadVariableOp"(%1) {device = ""} : (tensor<!tf.resource>) -> tensor<128x10xf32>
%6 = "tf.Reshape"(%arg0, %0) {device = ""} : (tensor<32x28x28xf32>, tensor<2xi32>) -> tensor<32x784xf32>
%7 = "tf.MatMul"(%6, %3) {device = "", transpose_a = false, transpose_b = false} : (tensor<32x784xf32>, tensor<784x128xf32>) -> tensor<32x128xf32>
%8 = "tf.BiasAdd"(%7, %2) {data_format = "NHWC", device = ""} : (tensor<32x128xf32>, tensor<128xf32>) -> tensor<32x128xf32>
%9 = "tf.Relu"(%8) {device = ""} : (tensor<32x128xf32>) -> tensor<32x128xf32>
%10 = "tf.MatMul"(%9, %5) {device = "", transpose_a = false, transpose_b = false} : (tensor<32x128xf32>, tensor<128x10xf32>) -> tensor<32x10xf32>
%11 = "tf.BiasAdd"(%10, %4) {data_format = "NHWC", device = ""} : (tensor<32x10xf32>, tensor<10xf32>) -> tensor<32x10xf32>
%12 = "tf.Identity"(%11) {device = ""} : (tensor<32x10xf32>) -> tensor<32x10xf32>
return %12 : tensor<32x10xf32>
}
}
The above MLIR is emitted with the new tf.function API. Do we need to add support for the tf.placeholders in the -tf-promote-resource-to-args pass (Is tf.placeholder replace tf.Varhandle ops)?