For anyone looking here in the future. No need for InputSidePacket, regular input_stream would work. Example:
input_stream: "selected_encoder"
node
{
calculator: "SwitchContainer"
name: "Encoder"
input_stream: "SELECT:selected_encoder"
input_stream: "INPUT_TENSOR:input_tensor"
output_stream: "OUTPUT_TENSOR:encoder_tensor"
node_options:
{
[
type.googleapis.com/mediapipe.SwitchContainerOptions]
{
contained_node:
{
calculator: "OnnxModelInference"
name: "LadonNamedEncoder"
options: {
[mediapipe.OnnxSessionFromSavedModelOptions.ext]
{
saved_model_path: "/media/data/ml_models/ladon_named_tensors/encoder_model.onnx"
}
}
}
contained_node:
{
calculator: "OnnxModelInference"
name: "LadonIndexEncoder"
options: {
[mediapipe.OnnxSessionFromSavedModelOptions.ext]
{
saved_model_path: "/media/data/ml_models/ladon_named_tensors/different_encoder.onnx"
input_tensor_index: [0]
}
}
}
}
}
}
In the code after you have initialized and started the graph object:
auto encoder = mediapipe::MakePacket<int32_t>(1);
auto timestamp_us =
(double)cv::getTickCount() / (double)cv::getTickFrequency() * 1e6;
auto status = graph->AddPacketToInputStream("selected_encoder",
encoder.At(mediapipe::Timestamp(timestamp_us)));