Skip to content

Commit

Permalink
code clean up
Browse files Browse the repository at this point in the history
  • Loading branch information
wenbingl committed Oct 18, 2024
1 parent 0930a2b commit e9e7dbf
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 73 deletions.
57 changes: 0 additions & 57 deletions include/custom_op/tensor_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -557,63 +557,6 @@ class Tensor<std::string_view> : public TensorBase {
std::unique_ptr<IStringTensorStorage<std::string_view>> storage_;
};


// template<>
// class Tensor<std::vector<TensorBase*>> : public TensorBase {
// public:
// using TensorVector = std::vector<TensorBase*>;
// Tensor(const TensorVector& ss) : storage_(std::make_unique<EagerStringTensorStorage<TensorVector>>(ss)) {}

// Tensor() : storage_(std::make_unique<EagerStringTensorStorage<TensorVector>>()) {}

// ONNXTensorElementDataType Type() const override {
// return GetOrtDType<std::vector<TensorBase*>>();
// }

// const TensorVector& Data() const {
// return storage_->Data();
// }

// const std::vector<int64_t>& Shape() const override {
// return storage_->Shape();
// }

// int64_t NumberOfElement() const override {
// auto& shape = storage_->Shape();
// return std::accumulate(shape.begin(), shape.end(), 1LL, std::multiplies<int64_t>());
// }

// std::string Shape2Str() const {
// if (storage_->IsInitialized()) {
// std::string shape_str;
// auto& shape = storage_->Shape();
// for (const auto& dim : shape) {
// shape_str.append(std::to_string(dim));
// shape_str.append(", ");
// }
// return shape_str;
// } else {
// return "empty";
// }
// }

// const void* DataRaw() const override {
// return storage_->DataRaw();
// }

// size_t SizeInBytes() const override {
// return 0;
// }

// std::byte* AllocateRaw(const std::vector<int64_t>& shape) override {
// ORTX_CXX_API_THROW("AllocateRaw() not supported for string tensor", ORT_RUNTIME_EXCEPTION);
// }
// private:
// std::unique_ptr<IStringTensorStorage<TensorVector>> storage_;
// };

// using TensorSequence = Tensor<std::vector<TensorBase*>>;

template<typename ...Args>
class NamedArgumentDict{
public:
Expand Down
17 changes: 1 addition & 16 deletions shared/api/runner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,6 @@ class Operation {
}

void ResetTensors(ortc::IAllocator* allocator) { outputs_.clear(); }
bool IsSequenceOnly() { return false; }

private:
std::vector<std::unique_ptr<ortc::TensorBase>> outputs_;
Expand Down Expand Up @@ -308,10 +307,6 @@ class OrtxRunner {
return status;
}

if (op->IsSequenceOnly()) {
break;
}

last_op = op;
}
}
Expand Down Expand Up @@ -375,7 +370,7 @@ class OrtxRunner {
element_size);
}
} else {
memset(dest + i * dest_chunk_size * element_size, 0, dest_chunk_size * element_size);
std::memset(dest + i * dest_chunk_size * element_size, 0, dest_chunk_size * element_size);
}
}
}
Expand Down Expand Up @@ -409,16 +404,6 @@ class OrtxRunner {
}

std::vector<int64_t> output_shape = shape;
// if (!is_same_shape) {
// if (ts_ptrs.front()->Type() != ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64) {
// return {kOrtxErrorInvalidArgument, "[StackTensors]: shapes of tensors to stack are not the same."};
// } else {
// // if the shape is not the same, but the type is int64, let's pad the shape before the stack
// // since shape is already is the max shape, we don't need to do anything here
// ;
// }
// }

output_shape.insert(output_shape.begin(), batch_size);
std::byte* tensor_buf = outputs[axis]->AllocateRaw(output_shape);
auto ts_size = outputs[axis]->SizeInBytes() / batch_size;
Expand Down

0 comments on commit e9e7dbf

Please sign in to comment.