A (IO) -> B (foo) -> C (TF) -> D (fuu) -> E (IO)
struct node{
node(){ //whatever }
message operator()(const & message<){
super_algo(message); // modify the message
return std::move(message)
}
}struct helper_tf {
explicit helper_tf(const std::string& model)
: graph_(TF_NewGraph(), TF_DeleteGraph), status_(TF_NewStatus(), TF_DeleteStatus),
session_opts_(TF_NewSessionOptions(), TF_DeleteSessionOptions), run_opts_(nullptr) {
const char *tags = "serve";
int ntags = 1;
session_ = TF_LoadSessionFromSavedModel(session_opts_.get(),
run_opts_, model.c_str(), &tags, ntags,graph_.get(), nullptr, status_.get());
if (TF_GetCode(status_.get()) != TF_OK)
throw std::runtime_error(TF_Message(status_.get()));
};
~helper_tf() { TF_DeleteSession(session_, status_.get()); }
TF_Session *session_;
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> graph_;
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status_;
std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)> session_opts_;
TF_Buffer *run_opts_;
}; uint8_t inter_op_parallelism_threads = 2;
uint8_t intra_op_parallelism_threads = 2;
uint8_t config[] = {0x10, intra_op_parallelism_threads, 0x28, inter_op_parallelism_threads};
TF_SetConfig(session_opts_.get(), (void *)config, 4, status_.get());
--
You received this message because you are subscribed to the Google Groups "TensorFlow Developers" group.
To unsubscribe from this group and stop receiving emails from it, send an email to developers+...@tensorflow.org.
To view this discussion on the web visit https://groups.google.com/a/tensorflow.org/d/msgid/developers/c6cf11ef-57ac-464e-a593-6f8547f1fcc4%40tensorflow.org.
def foo(image, model):
with tf.device("/device:gpu:3"):<-----------------------------------this is the line !
infer = model.signatures["serving_default"]
return infer(tf.constant(image))["input"]
To unsubscribe from this group and stop receiving emails from it, send an email to devel...@tensorflow.org.
const char *tags = "serve";
int ntags = 1;
// get the session from a saved model
session_ = TF_LoadSessionFromSavedModel(session_opts_.get(), run_opts_, model.c_str(), &tags, ntags,
graph_.get(), meta_buffer_.get(), status_.get());
check_status(status_.get());
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> buffer(TF_NewBuffer(), TF_DeleteBuffer);
// transform the graph to graphdef
TF_GraphToGraphDef(graph_.get(), buffer.get(), status_.get());
check_status(status_.get());
int ngpu = 0;
// get number of gpu
cuDeviceGetCount(&ngpu);
for (int g = 0; g < ngpu; ++g) {
// create a Protobufs to generate the good option,
TF_SessionOptions *session_opts = TF_NewSessionOptions();
std::vector<uint8_t> config = {0x32, 0x1c, 0x9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe0, 0x3f,
0x20, 0x1, 0x2a, 0xf, 0x30, 0x2c, 0x31, 0x2c, 0x32, 0x2c, 0x33,
0x2c, 0x34, 0x2c, 0x35, 0x2c, 0x36, 0x2c, 0x37, 0x38, 0x1};
TF_SetConfig(session_opts, (void *)config.data(), config.size(), status_.get());
check_status(status_.get());
// new options for the grah
std::unique_ptr<TF_ImportGraphDefOptions, decltype(&TF_DeleteImportGraphDefOptions)> graph_options(
TF_NewImportGraphDefOptions(), TF_DeleteImportGraphDefOptions);
// prepare the string for every device
std::string device = "/device:GPU:" + std::to_string(g);
// setup the device option with the correct string
TF_ImportGraphDefOptionsSetDefaultDevice(graph_options.get(), device.c_str());
// prepare the new modified graph
TF_Graph *ngraph = TF_NewGraph();
// using the previous buffer + options generate the new graph
TF_GraphImportGraphDef(ngraph, buffer.get(), graph_options.get(), status_.get());
check_status(status_.get());
// generate a new session with the new modify graph
TF_Session *device_session = TF_NewSession(ngraph, session_opts, status_.get());
check_status(status_.get());
// push the the new session, opt, graph into concurrent queue, thread will pickup a session to execute TF_SessionRun
qsession_.push(std::make_tuple(device_session, session_opts, ngraph));
}However this new session will crash during the execution of TF_SessionRun. Indeed the graph has been modified (all GPU are correctly allocated at least) but data from the saved model are "missing" into the new session.
I get the following error:
Container: localhost. This could mean that the variable was uninitialized. Not found: Container localhost does not exist. (Could not find resource:
This "feature" is independent of what I am trying to do. Currently, from the current C-API, I think it is not possible to restart a working new Session from the Graph obtained from TF_LoadSessionFromSavedModel, the graph is ok but the meta data will be always missing.
auto session = TF_LoadSessionFromSavedModel(session_opts_.get(), run_opts_, model.c_str(), &tags, ntags,
graph_.get(), nullptr, status_.get());
auto new_session = TF_NewSession(graph_.get(), session_opts_.get(), status_.get()); // Usage of this will fail because metadata are missingIf you look the source or TF_LoadSessionFromSavedModel the TF_Session is built using all the content of the directory from the saved model. So game over or not, but reading all the functions in c_api.h I do not find anything to connect the "data" of the saved model to the new graph, I have generated.
Thank you very much for the support
++t
what(): TensorFlow device (GPU:0) is being mapped to multiple CUDA devices (1 now, and 0 previously), which is not supported. This may be the result of providing different GPU configurations (ConfigProto.gpu_options, for example different visible_device_list) when creating multiple Sessions in the same process. This is not currently supported, see https://github.com/tensorflow/tensorflow/issues/19083