Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add wide_n_deep_serving_client #31

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 112 additions & 0 deletions wide_n_deep_serving_client/wide_n_deep_serving_client.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
/*
* wide_n_deep_serving_client.cpp
*
* Created on: 2017��10��28��
* Author: lambdaji
*/

#include "wide_n_deep_serving_client.h"
#include "google/protobuf/map.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/command_line_flags.h"
#include <vector>
#include <string>

typedef google::protobuf::Map<tensorflow::string, tensorflow::TensorProto> OutMap;

using grpc::Channel;
using grpc::ClientContext;
using grpc::Status;

using tensorflow::serving::PredictRequest;
using tensorflow::serving::PredictResponse;

int ServingClient::callPredict(const std::string& model_name, const std::string& model_signature_name, std::map<std::string, std::string> & result)
{
PredictRequest predictRequest;
PredictResponse response;
ClientContext context;
tensorflow::TensorProto req_tp;
tensorflow::Example example;
//int64_t iBegin = TNOWMS;
//int64_t iEnd = TNOWMS;

predictRequest.mutable_model_spec()->set_name(model_name);
predictRequest.mutable_model_spec()->set_signature_name(model_signature_name); //serving_default

//iBegin = TNOWMS;
google::protobuf::Map<tensorflow::string, tensorflow::TensorProto>& inputs = *predictRequest.mutable_inputs();
google::protobuf::Map<tensorflow::string, tensorflow::Feature>& feature_dict = *example.mutable_features()->mutable_feature();

//feature to example
feature_dict["age"].mutable_float_list()->add_value(25);
feature_dict["capital_gain"].mutable_float_list()->add_value(0);
feature_dict["capital_loss"].mutable_float_list()->add_value(0);
feature_dict["education"].mutable_bytes_list()->add_value("11th");
feature_dict["education_num"].mutable_float_list()->add_value(7);
feature_dict["gender"].mutable_bytes_list()->add_value("Male");
feature_dict["hours_per_week"].mutable_float_list()->add_value(40);
feature_dict["native_country"].mutable_bytes_list()->add_value("United-States");
feature_dict["occupation"].mutable_bytes_list()->add_value("Machine-op-inspct");
feature_dict["relationship"].mutable_bytes_list()->add_value("Own-child");
feature_dict["workclass"].mutable_bytes_list()->add_value("Private");

//serialize to req.inputs
string serialized;
example.SerializeToString(&serialized);
req_tp.mutable_tensor_shape()->add_dim()->set_size(1); //set_size(5) for batch predicting
req_tp.set_dtype(tensorflow::DataType::DT_STRING);
req_tp.add_string_val(serialized); //1st
//req_tp.add_string_val(serialized); //2nd
//req_tp.add_string_val(serialized); //3rd
//req_tp.add_string_val(serialized); //4th
//req_tp.add_string_val(serialized); //5th
inputs["inputs"] = req_tp;

//iEnd = TNOWMS;
//TLOGDEBUG("ServingClient::callPredict sample_to_tfrequest timecost(ms):" << (iEnd - iBegin) << endl);

//predict
//iBegin = TNOWMS;
Status status = _stub->Predict(&context, predictRequest, &response);
//iEnd = TNOWMS;
//TLOGDEBUG("ServingClient::callPredict _stub->Predict timecost(ms):" << (iEnd - iBegin) << endl);

if (status.ok())
{
//TLOGDEBUG("ServingClient::callPredict call predict ok" << endl);
//TLOGDEBUG("ServingClient::callPredict outputs size is " << response.outputs_size() << endl);

OutMap& map_outputs = *response.mutable_outputs();
OutMap::iterator iter;
int output_index = 0;

for (iter = map_outputs.begin(); iter != map_outputs.end(); ++iter)
{
tensorflow::TensorProto& result_tensor_proto = iter->second;
tensorflow::Tensor tensor;
bool converted = tensor.FromProto(result_tensor_proto);
if (converted)
{
//TLOGDEBUG("ServingClient::callPredict the result tensor[" << output_index << "] is:" << tensor.SummarizeValue(10) << endl);
result[iter->first] = tensor.SummarizeValue(10);
}
else
{
//TLOGDEBUG("ServingClient::callPredict the result tensor[" << output_index << "] convert failed." << endl);
}
++output_index;
}

return 0;
}
else
{
//TLOGDEBUG("ServingClient::callPredict gRPC call return code: " << status.error_code() << ": " << status.error_message() << endl);
return -1;
}
}

41 changes: 41 additions & 0 deletions wide_n_deep_serving_client/wide_n_deep_serving_client.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/*
* wide_n_deep_serving_client.h
*
* Created on: 2017��10��28��
* Author: lambdaji
*/

#ifndef WIDE_N_DEEP_SERVING_CLIENT_H_
#define WIDE_N_DEEP_SERVING_CLIENT_H_

#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <map>
#include <vector>

#include "grpc++/create_channel.h"
#include "grpc++/security/credentials.h"
#include "tensorflow_serving/apis/prediction_service.grpc.pb.h"

using tensorflow::serving::PredictionService;

class ServingClient
{
public:
static std::shared_ptr<ServingClient> createClient(const std::string sServerPort){
std::shared_ptr<ServingClient> p = std::make_shared<ServingClient>(grpc::CreateChannel(sServerPort, grpc::InsecureChannelCredentials()));
return p;
}
public:
ServingClient(const std::shared_ptr<grpc::Channel>& channel) : _stub(PredictionService::NewStub(channel)) { }
int callPredict(const std::string& model_name, const std::string& model_signature_name, std::map<std::string, std::string> & result);

private:
std::unique_ptr<PredictionService::Stub> _stub;
};



#endif /* WIDE_N_DEEP_SERVING_CLIENT_H_ */
12 changes: 12 additions & 0 deletions wide_n_deep_serving_client/wide_n_deep_serving_client.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
Serving a TensorFlow Wide & Deep Model (tf.estimator) by TF-Serving in C++

This tutorial builds on the code developed in TensorFlow Wide & Deep Learning Tutorial, so if you haven't yet completed that tutorial, you need to take a look at it first.

This tutorial referenced a python version from https://github.com/MtDersvan/tf_playground/tree/master/wide_and_deep_tutorial.

You'd better take a look at the following proto file first in:
- tensorflow/core/example/example.proto
- tensorflow/core/example/feature.proto
- tensorflow/core/framework/tensor.proto
- tensorflow_serving/apis/predict.proto
- tensorflow_serving/apis/model.proto