From 2d089f850f551feaaddde76157ba8c611c0ee1a4 Mon Sep 17 00:00:00 2001 From: misterpoe Date: Fri, 5 Aug 2016 14:04:25 -0700 Subject: [PATCH 01/22] Node Tracing Controller * Add Node changes * Experimental: separate thread for tracing + two TraceBuffers * Combine two TraceBuffers into single one * Implement src/trace-event.h in Node --- node.gyp | 6 + src/env-inl.h | 1 + src/env.h | 6 + src/node.cc | 14 + src/node.h | 1 + src/tracing/agent.cc | 94 +++++ src/tracing/agent.h | 42 ++ src/tracing/node_trace_buffer.cc | 125 ++++++ src/tracing/node_trace_buffer.h | 73 ++++ src/tracing/node_trace_writer.cc | 104 +++++ src/tracing/node_trace_writer.h | 51 +++ src/tracing/trace_config_parser.cc | 109 ++++++ src/tracing/trace_config_parser.h | 49 +++ src/tracing/trace_event.cc | 17 + src/tracing/trace_event.h | 589 +++++++++++++++++++++++++++++ 15 files changed, 1281 insertions(+) create mode 100644 src/tracing/agent.cc create mode 100644 src/tracing/agent.h create mode 100644 src/tracing/node_trace_buffer.cc create mode 100644 src/tracing/node_trace_buffer.h create mode 100644 src/tracing/node_trace_writer.cc create mode 100644 src/tracing/node_trace_writer.h create mode 100644 src/tracing/trace_config_parser.cc create mode 100644 src/tracing/trace_config_parser.h create mode 100644 src/tracing/trace_event.cc create mode 100644 src/tracing/trace_event.h diff --git a/node.gyp b/node.gyp index 8f5510d44c7366..31c1a79719c18c 100644 --- a/node.gyp +++ b/node.gyp @@ -136,10 +136,16 @@ 'src', 'tools/msvs/genfiles', 'deps/uv/src/ares', + 'deps/v8/base/trace_event/common', '<(SHARED_INTERMEDIATE_DIR)', # for node_natives.h ], 'sources': [ + 'src/tracing/agent.cc', + 'src/tracing/node_trace_buffer.cc', + 'src/tracing/node_trace_writer.cc', + 'src/tracing/trace_config_parser.cc', + 'src/tracing/trace_event.cc', 'src/debug-agent.cc', 'src/async-wrap.cc', 'src/env.cc', diff --git a/src/env-inl.h b/src/env-inl.h index 4b43057de33f6a..16b40b0b3a40c0 100644 --- a/src/env-inl.h +++ b/src/env-inl.h @@ -176,6 +176,7 @@ inline Environment::Environment(IsolateData* isolate_data, #if HAVE_INSPECTOR inspector_agent_(this), #endif + tracing_agent_(this), handle_cleanup_waiting_(0), http_parser_buffer_(nullptr), context_(context->GetIsolate(), context) { diff --git a/src/env.h b/src/env.h index 8c256ca9c7faa4..76ff5783fb99e4 100644 --- a/src/env.h +++ b/src/env.h @@ -8,6 +8,7 @@ #if HAVE_INSPECTOR #include "inspector_agent.h" #endif +#include "tracing/agent.h" #include "handle_wrap.h" #include "req-wrap.h" #include "tree.h" @@ -537,6 +538,10 @@ class Environment { } #endif + inline tracing::Agent* tracing_agent() { + return &tracing_agent_; + } + typedef ListHead HandleWrapQueue; typedef ListHead, &ReqWrap::req_wrap_queue_> ReqWrapQueue; @@ -574,6 +579,7 @@ class Environment { #if HAVE_INSPECTOR inspector::Agent inspector_agent_; #endif + tracing::Agent tracing_agent_; HandleWrapQueue handle_wrap_queue_; ReqWrapQueue req_wrap_queue_; diff --git a/src/node.cc b/src/node.cc index 3147a90d3e3844..ad1453d462bab7 100644 --- a/src/node.cc +++ b/src/node.cc @@ -150,6 +150,8 @@ static node_module* modpending; static node_module* modlist_builtin; static node_module* modlist_linked; static node_module* modlist_addon; +static bool trace_enabled = false; +static const char* trace_config_file = nullptr; #if defined(NODE_HAVE_I18N_SUPPORT) // Path to ICU data (for i18n / Intl) @@ -194,6 +196,7 @@ static struct { void Initialize(int thread_pool_size) { platform_ = v8::platform::CreateDefaultPlatform(thread_pool_size); V8::InitializePlatform(platform_); + tracing::TraceEventHelper::SetCurrentPlatform(platform_); } void PumpMessageLoop(Isolate* isolate) { @@ -2204,6 +2207,7 @@ static void WaitForInspectorDisconnect(Environment* env) { void Exit(const FunctionCallbackInfo& args) { WaitForInspectorDisconnect(Environment::GetCurrent(args)); + Environment::GetCurrent(args)->tracing_agent()->Stop(); exit(args[0]->Int32Value()); } @@ -3626,6 +3630,10 @@ static void ParseArgs(int* argc, trace_deprecation = true; } else if (strcmp(arg, "--trace-sync-io") == 0) { trace_sync_io = true; + } else if (strcmp(arg, "--enable-tracing") == 0) { + trace_enabled = true; + } else if (strncmp(arg, "--trace-config=", 15) == 0) { + trace_config_file = arg + 15; } else if (strcmp(arg, "--track-heap-objects") == 0) { track_heap_objects = true; } else if (strcmp(arg, "--throw-deprecation") == 0) { @@ -4332,6 +4340,11 @@ inline int Start(Isolate* isolate, IsolateData* isolate_data, return 12; // Signal internal error. } + // Enable tracing when argv has --enable-tracing. + if (trace_enabled) { + env.tracing_agent()->Start(v8_platform.platform_, trace_config_file); + } + { Environment::AsyncCallbackScope callback_scope(&env); LoadEnvironment(&env); @@ -4369,6 +4382,7 @@ inline int Start(Isolate* isolate, IsolateData* isolate_data, RunAtExit(&env); WaitForInspectorDisconnect(&env); + env.tracing_agent()->Stop(); #if defined(LEAK_SANITIZER) __lsan_do_leak_check(); #endif diff --git a/src/node.h b/src/node.h index 873551fa338a97..098339da1a2b9e 100644 --- a/src/node.h +++ b/src/node.h @@ -41,6 +41,7 @@ #include "v8.h" // NOLINT(build/include_order) #include "node_version.h" // NODE_MODULE_VERSION +#include "tracing/trace_event.h" #define NODE_MAKE_VERSION(major, minor, patch) \ ((major) * 0x1000 + (minor) * 0x100 + (patch)) diff --git a/src/tracing/agent.cc b/src/tracing/agent.cc new file mode 100644 index 00000000000000..cb9d419ba8aa9d --- /dev/null +++ b/src/tracing/agent.cc @@ -0,0 +1,94 @@ +#include "tracing/agent.h" + +#include "env-inl.h" +#include "libplatform/libplatform.h" +#include "tracing/trace_config_parser.h" + +namespace node { +namespace tracing { + +Agent::Agent(Environment* env) : parent_env_(env) {} + +void Agent::Start(v8::Platform* platform, const char* trace_config_file) { + auto env = parent_env_; + platform_ = platform; + + int err = uv_loop_init(&child_loop_); + CHECK_EQ(err, 0); + + NodeTraceWriter* trace_writer = new NodeTraceWriter(&child_loop_); + TraceBuffer* trace_buffer = new NodeTraceBuffer( + NodeTraceBuffer::kBufferChunks, trace_writer, this); + + flush_signal_.data = trace_buffer; + err = uv_async_init(&child_loop_, &flush_signal_, FlushSignalCb); + CHECK_EQ(err, 0); + + tracing_controller_ = new TracingController(); + + TraceConfig* trace_config = new TraceConfig(); + if (trace_config_file) { + std::ifstream fin(trace_config_file); + std::string str((std::istreambuf_iterator(fin)), + std::istreambuf_iterator()); + TraceConfigParser::FillTraceConfig(env->isolate(), trace_config, + str.c_str()); + } else { + trace_config->AddIncludedCategory("v8"); + trace_config->AddIncludedCategory("node"); + } + tracing_controller_->Initialize(trace_buffer); + tracing_controller_->StartTracing(trace_config); + v8::platform::SetTracingController(platform, tracing_controller_); + + err = uv_thread_create(&thread_, ThreadCb, this); + CHECK_EQ(err, 0); +} + +void Agent::Stop() { + if (!IsStarted()) { + return; + } + // Perform final Flush on TraceBuffer. We don't want the tracing controller + // to flush the buffer again on destruction of the V8::Platform. + + // TODO: Note that the serialization of this Flush is done on the main thread + // and passed to libuv file write on the child thread later. + // We might want to perform the serialization on the child thread instead, + // but the current TracingController does not allow override of StopTracing(). + // By allowing serialization to be performed on the main thread, + // NodeTraceWriter::IsReady() is now accessed from multiple threads, and + // there might be race conditions if the child thread is still processing + // something at the moment. + // This can also be resolved by ensuring the child thread is free before + // stopping tracing. + tracing_controller_->StopTracing(); + delete tracing_controller_; + v8::platform::SetTracingController(platform_, nullptr); + // TODO: We might need to cleanup loop properly upon exit. + // 1. Close child_loop_. + // 2. Close flush_signal_. + // 3. Destroy thread_. +} + +// static +void Agent::ThreadCb(void* agent) { + static_cast(agent)->WorkerRun(); +} + +void Agent::WorkerRun() { + uv_run(&child_loop_, UV_RUN_DEFAULT); +} + +// static +void Agent::FlushSignalCb(uv_async_t* signal) { + static_cast(signal->data)->Flush(); +} + +void Agent::SendFlushSignal() { + int err = uv_async_send(&flush_signal_); + CHECK_EQ(err, 0); +} + +} // namespace tracing +} // namespace node diff --git a/src/tracing/agent.h b/src/tracing/agent.h new file mode 100644 index 00000000000000..ba96b25734f8cd --- /dev/null +++ b/src/tracing/agent.h @@ -0,0 +1,42 @@ +#ifndef SRC_TRACING_AGENT_H_ +#define SRC_TRACING_AGENT_H_ + +#include "tracing/node_trace_buffer.h" +#include "tracing/node_trace_writer.h" +#include "uv.h" +#include "v8.h" + +// Forward declaration to break recursive dependency chain with src/env.h. +namespace node { +class Environment; +} // namespace node + +namespace node { +namespace tracing { + +class Agent { + public: + explicit Agent(Environment* env); + void Start(v8::Platform* platform, const char* trace_config_file); + void Stop(); + void SendFlushSignal(); + + private: + static void ThreadCb(void* agent); + static void FlushSignalCb(uv_async_t* signal); + + bool IsStarted() { return platform_ != nullptr; } + void WorkerRun(); + + v8::Platform* platform_ = nullptr; + Environment* parent_env_; + uv_thread_t thread_; + uv_loop_t child_loop_; + uv_async_t flush_signal_; + TracingController* tracing_controller_; +}; + +} // namespace tracing +} // namespace node + +#endif // SRC_TRACING_AGENT_H_ diff --git a/src/tracing/node_trace_buffer.cc b/src/tracing/node_trace_buffer.cc new file mode 100644 index 00000000000000..8a2e1d4691447d --- /dev/null +++ b/src/tracing/node_trace_buffer.cc @@ -0,0 +1,125 @@ +#include "tracing/node_trace_buffer.h" + +#include "tracing/agent.h" + +namespace node { +namespace tracing { + +const double InternalTraceBuffer::kFlushThreshold = 0.75; + +InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, + NodeTraceWriter* trace_writer, Agent* agent) + : max_chunks_(max_chunks), trace_writer_(trace_writer), agent_(agent) { + chunks_.resize(max_chunks); +} + +TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) { + Mutex::ScopedLock scoped_lock(mutex_); + // If the buffer usage exceeds kFlushThreshold, attempt to perform a flush. + // This number should be customizable. + if (total_chunks_ >= max_chunks_ * kFlushThreshold) { + // Tell tracing agent thread to perform a Flush on this buffer. + // Note that the signal might be ignored if the writer is busy now. + agent_->SendFlushSignal(); + } + // Create new chunk if last chunk is full or there is no chunk. + if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) { + if (total_chunks_ == max_chunks_) { + // There is no more space to store more trace events. + *handle = MakeHandle(0, 0, 0); + return nullptr; + } + auto& chunk = chunks_[total_chunks_++]; + if (chunk) { + chunk->Reset(current_chunk_seq_++); + } else { + chunk.reset(new TraceBufferChunk(current_chunk_seq_++)); + } + } + auto& chunk = chunks_[total_chunks_ - 1]; + size_t event_index; + TraceObject* trace_object = chunk->AddTraceEvent(&event_index); + *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index); + return trace_object; +} + +TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) { + Mutex::ScopedLock scoped_lock(mutex_); + size_t chunk_index, event_index; + uint32_t chunk_seq; + ExtractHandle(handle, &chunk_index, &chunk_seq, &event_index); + if (chunk_index >= total_chunks_) { + return NULL; + } + auto& chunk = chunks_[chunk_index]; + if (chunk->seq() != chunk_seq) { + return NULL; + } + return chunk->GetEventAt(event_index); +} + +void InternalTraceBuffer::Flush() { + for (size_t i = 0; i < total_chunks_; ++i) { + auto& chunk = chunks_[i]; + for (size_t j = 0; j < chunk->size(); ++j) { + trace_writer_->AppendTraceEvent(chunk->GetEventAt(j)); + } + } + trace_writer_->Flush(); + total_chunks_ = 0; +} + +uint64_t InternalTraceBuffer::MakeHandle( + size_t chunk_index, uint32_t chunk_seq, size_t event_index) const { + return static_cast(chunk_seq) * Capacity() + + chunk_index * TraceBufferChunk::kChunkSize + event_index; +} + +void InternalTraceBuffer::ExtractHandle( + uint64_t handle, size_t* chunk_index, + uint32_t* chunk_seq, size_t* event_index) const { + *chunk_seq = static_cast(handle / Capacity()); + size_t indices = handle % Capacity(); + *chunk_index = indices / TraceBufferChunk::kChunkSize; + *event_index = indices % TraceBufferChunk::kChunkSize; +} + +NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks, + NodeTraceWriter* trace_writer, Agent* agent) + : trace_writer_(trace_writer), + buffer1_(max_chunks, trace_writer, agent), + buffer2_(max_chunks, trace_writer, agent) { + current_buf_.store(&buffer1_); +} + +TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) { + return current_buf_.load()->AddTraceEvent(handle); +} + +TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) { + return current_buf_.load()->GetEventByHandle(handle); +} + +bool NodeTraceBuffer::Flush() { + // This function should mainly be called from the tracing agent thread. + // TODO: However, it could be called from the main thread, when + // the tracing controller stops tracing. + // Ideally we would want all Flushes to be done on the tracing thread, to + // prevent concurrent access of trace_writer_. + if (!trace_writer_->IsReady()) { + return false; + } + InternalTraceBuffer* prev_buf = current_buf_.load(); + if (prev_buf == &buffer1_) { + current_buf_.store(&buffer2_); + } else { + current_buf_.store(&buffer1_); + } + // Flush the other buffer. + // Note that concurrently, we can AddTraceEvent to the current buffer. + prev_buf->Flush(); + return true; +} + +} // namespace tracing +} // namespace node diff --git a/src/tracing/node_trace_buffer.h b/src/tracing/node_trace_buffer.h new file mode 100644 index 00000000000000..97678070333151 --- /dev/null +++ b/src/tracing/node_trace_buffer.h @@ -0,0 +1,73 @@ +#ifndef SRC_NODE_TRACE_BUFFER_H_ +#define SRC_NODE_TRACE_BUFFER_H_ + +#include "node_mutex.h" +#include "tracing/node_trace_writer.h" +#include "libplatform/v8-tracing.h" + +#include + +// Forward declaration to break recursive dependency chain with tracing/agent.h. +namespace node { +namespace tracing { + class Agent; +} // namespace tracing +} // namespace node + +namespace node { +namespace tracing { + +using v8::platform::tracing::TraceBuffer; +using v8::platform::tracing::TraceBufferChunk; +using v8::platform::tracing::TraceObject; + +class InternalTraceBuffer { + public: + InternalTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer, + Agent* agent); + + TraceObject* AddTraceEvent(uint64_t* handle); + TraceObject* GetEventByHandle(uint64_t handle); + void Flush(); + + static const double kFlushThreshold; + + private: + uint64_t MakeHandle(size_t chunk_index, uint32_t chunk_seq, + size_t event_index) const; + void ExtractHandle(uint64_t handle, size_t* chunk_index, + uint32_t* chunk_seq, size_t* event_index) const; + size_t Capacity() const { return max_chunks_ * TraceBufferChunk::kChunkSize; } + + Mutex mutex_; + size_t max_chunks_; + NodeTraceWriter* trace_writer_; + Agent* agent_; + std::vector> chunks_; + size_t total_chunks_ = 0; + uint32_t current_chunk_seq_ = 1; +}; + +class NodeTraceBuffer : public TraceBuffer { + public: + NodeTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer, + Agent* agent); + + TraceObject* AddTraceEvent(uint64_t* handle) override; + TraceObject* GetEventByHandle(uint64_t handle) override; + bool Flush() override; + + static const size_t kBufferChunks = 1024; + + private: + std::unique_ptr trace_writer_; + // TODO: Change std::atomic to something less contentious. + std::atomic current_buf_; + InternalTraceBuffer buffer1_; + InternalTraceBuffer buffer2_; +}; + +} // namespace tracing +} // namespace node + +#endif // SRC_NODE_TRACING_CONTROLLER_H_ diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc new file mode 100644 index 00000000000000..0bc61ad86e610c --- /dev/null +++ b/src/tracing/node_trace_writer.cc @@ -0,0 +1,104 @@ +#include "tracing/node_trace_writer.h" + +#include + +#include "trace_event_common.h" +#include "util.h" + +namespace node { +namespace tracing { + +NodeTraceWriter::NodeTraceWriter(uv_loop_t* tracing_loop) + : tracing_loop_(tracing_loop) { + write_req_.writer = this; +} + +void NodeTraceWriter::WriteSuffix() { + // If our final log file has traces, then end the file appropriately. + // This means that if no trace events are recorded, then no trace file is + // produced. + if (total_traces_ > 0) { + WriteToFile("]}\n"); + } +} + +NodeTraceWriter::~NodeTraceWriter() { + WriteSuffix(); + uv_fs_t req; + int err = uv_fs_close(tracing_loop_, &req, fd_, nullptr); + CHECK_EQ(err, 0); + uv_fs_req_cleanup(&req); + uv_close(reinterpret_cast(&trace_file_pipe_), nullptr); +} + +void NodeTraceWriter::OpenNewFileForStreaming() { + ++file_num_; + uv_fs_t req; + std::string log_file = "node_trace.log." + std::to_string(file_num_); + fd_ = uv_fs_open(tracing_loop_, &req, log_file.c_str(), + O_CREAT | O_WRONLY | O_TRUNC, 0644, NULL); + uv_fs_req_cleanup(&req); + uv_pipe_init(tracing_loop_, &trace_file_pipe_, 0); + uv_pipe_open(&trace_file_pipe_, fd_); + // Note that the following does not get flushed to file immediately. + stream_ << "{\"traceEvents\":["; +} + +void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) { + // Set a writing_ flag here so that we only do a single Flush at any one + // point in time. This is because we need the stream_ to be alive when we + // are flushing. + is_writing_ = true; + // If this is the first trace event, open a new file for streaming. + if (total_traces_ == 0) { + OpenNewFileForStreaming(); + } else { + stream_ << ",\n"; + } + ++total_traces_; + stream_ << "{\"pid\":" << trace_event->pid() + << ",\"tid\":" << trace_event->tid() + << ",\"ts\":" << trace_event->ts() + << ",\"tts\":" << trace_event->tts() << ",\"ph\":\"" + << trace_event->phase() << "\",\"cat\":\"" + << TracingController::GetCategoryGroupName( + trace_event->category_enabled_flag()) + << "\",\"name\":\"" << trace_event->name() + << "\",\"args\":{},\"dur\":" << trace_event->duration() + << ",\"tdur\":" << trace_event->cpu_duration(); + if (trace_event->flags() & TRACE_EVENT_FLAG_HAS_ID) { + if (trace_event->scope() != nullptr) { + stream_ << ",\"scope\":\"" << trace_event->scope() << "\""; + } + stream_ << ",\"id\":" << trace_event->id(); + } + stream_ << "}"; +} + +void NodeTraceWriter::Flush() { + if (total_traces_ >= kTracesPerFile) { + total_traces_ = 0; + stream_ << "]}\n"; + } + WriteToFile(stream_.str().c_str()); +} + +void NodeTraceWriter::WriteToFile(const char* str) { + uv_buf_t uv_buf = uv_buf_init(const_cast(str), strlen(str)); + // We can reuse write_req_ here because we are assured that no other + // uv_write using write_req_ is ongoing. In the case of synchronous writes + // where the callback has not been fired, it is unclear whether it is OK + // to reuse write_req_. + uv_write(reinterpret_cast(&write_req_), + reinterpret_cast(&trace_file_pipe_), + &uv_buf, 1, WriteCb); +} + +void NodeTraceWriter::WriteCb(uv_write_t* req, int status) { + NodeTraceWriter* writer = reinterpret_cast(req)->writer; + writer->stream_.str(""); + writer->is_writing_ = false; +} + +} // namespace tracing +} // namespace node diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h new file mode 100644 index 00000000000000..30e12112bbc795 --- /dev/null +++ b/src/tracing/node_trace_writer.h @@ -0,0 +1,51 @@ +#ifndef SRC_NODE_TRACE_WRITER_H_ +#define SRC_NODE_TRACE_WRITER_H_ + +#include + +#include "libplatform/v8-tracing.h" +#include "uv.h" + +namespace node { +namespace tracing { + +using v8::platform::tracing::TraceObject; +using v8::platform::tracing::TraceWriter; +using v8::platform::tracing::TracingController; + +class NodeTraceWriter : public TraceWriter { + public: + NodeTraceWriter(uv_loop_t* tracing_loop); + ~NodeTraceWriter(); + + bool IsReady() { return !is_writing_; } + void AppendTraceEvent(TraceObject* trace_event) override; + void Flush() override; + + static const int kTracesPerFile = 1 << 20; + + private: + struct WriteRequest { + uv_write_t req; + NodeTraceWriter* writer; + }; + + static void WriteCb(uv_write_t* req, int status); + void OpenNewFileForStreaming(); + void WriteToFile(const char* str); + void WriteSuffix(); + + uv_loop_t* tracing_loop_; + int fd_; + int total_traces_ = 0; + int file_num_ = 0; + WriteRequest write_req_; + uv_pipe_t trace_file_pipe_; + bool is_writing_ = false; + std::ostringstream stream_; +}; + +} // namespace tracing +} // namespace node + +#endif // SRC_NODE_TRACE_WRITER_H_ \ No newline at end of file diff --git a/src/tracing/trace_config_parser.cc b/src/tracing/trace_config_parser.cc new file mode 100644 index 00000000000000..626eeec81f0576 --- /dev/null +++ b/src/tracing/trace_config_parser.cc @@ -0,0 +1,109 @@ +#include "tracing/trace_config_parser.h" + +#include + +namespace node { +namespace tracing { + +// String options that can be used to initialize TraceOptions. +const char* kRecordUntilFull = "record-until-full"; +const char* kRecordContinuously = "record-continuously"; +const char* kRecordAsMuchAsPossible = "record-as-much-as-possible"; + +const char* kRecordModeParam = "record_mode"; +const char* kEnableSamplingParam = "enable_sampling"; +const char* kEnableSystraceParam = "enable_systrace"; +const char* kEnableArgumentFilterParam = "enable_argument_filter"; +const char* kIncludedCategoriesParam = "included_categories"; +const char* kExcludedCategoriesParam = "excluded_categories"; + +void TraceConfigParser::FillTraceConfig( + Isolate* isolate, TraceConfig* trace_config, const char* json_str) { + HandleScope handle_scope(isolate); + Local context = Context::New(isolate); + Context::Scope context_scope(context); + + Local source = + String::NewFromUtf8(isolate, json_str, NewStringType::kNormal) + .ToLocalChecked(); + Local result = JSON::Parse(context, source).ToLocalChecked(); + Local trace_config_object = Local::Cast(result); + + trace_config->SetTraceRecordMode( + GetTraceRecordMode(isolate, context, trace_config_object)); + if (GetBoolean(isolate, context, trace_config_object, kEnableSamplingParam)) + trace_config->EnableSampling(); + if (GetBoolean(isolate, context, trace_config_object, kEnableSystraceParam)) + trace_config->EnableSystrace(); + if (GetBoolean( + isolate, context, trace_config_object, kEnableArgumentFilterParam)) + trace_config->EnableArgumentFilter(); + UpdateCategoriesList(isolate, context, trace_config_object, + kIncludedCategoriesParam, trace_config); + UpdateCategoriesList(isolate, context, trace_config_object, + kExcludedCategoriesParam, trace_config); +} + +bool TraceConfigParser::GetBoolean(Isolate* isolate, Local context, + Local object, const char* property) { + Local value = GetValue(isolate, context, object, property); + if (value->IsNumber()) { + Local v8_boolean = value->ToBoolean(context).ToLocalChecked(); + return v8_boolean->Value(); + } + return false; +} + +int TraceConfigParser::UpdateCategoriesList( + Isolate* isolate, Local context, Local object, + const char* property, TraceConfig* trace_config) { + Local value = GetValue(isolate, context, object, property); + if (value->IsArray()) { + Local v8_array = Local::Cast(value); + for (int i = 0, length = v8_array->Length(); i < length; ++i) { + Local v = v8_array->Get(context, i) + .ToLocalChecked() + ->ToString(context) + .ToLocalChecked(); + String::Utf8Value str(v->ToString(context).ToLocalChecked()); + if (property == kIncludedCategoriesParam) { + trace_config->AddIncludedCategory(*str); + } else if (property == kExcludedCategoriesParam) { + trace_config->AddExcludedCategory(*str); + } + } + return v8_array->Length(); + } + return 0; +} + +TraceRecordMode TraceConfigParser::GetTraceRecordMode(Isolate* isolate, + Local context, + Local object) { + Local value = GetValue(isolate, context, object, kRecordModeParam); + if (value->IsString()) { + Local v8_string = value->ToString(context).ToLocalChecked(); + String::Utf8Value str(v8_string); + if (strcmp(kRecordUntilFull, *str) == 0) { + return TraceRecordMode::RECORD_UNTIL_FULL; + } else if (strcmp(kRecordContinuously, *str) == 0) { + return TraceRecordMode::RECORD_CONTINUOUSLY; + } else if (strcmp(kRecordAsMuchAsPossible, *str) == 0) { + return TraceRecordMode::RECORD_AS_MUCH_AS_POSSIBLE; + } + } + return TraceRecordMode::RECORD_UNTIL_FULL; +} + +Local TraceConfigParser::GetValue(Isolate* isolate, + Local context, + Local object, + const char* property) { + Local v8_str = + String::NewFromUtf8(isolate, property, NewStringType::kNormal) + .ToLocalChecked(); + return object->Get(context, v8_str).ToLocalChecked(); +} + +} // namespace tracing +} // namespace node diff --git a/src/tracing/trace_config_parser.h b/src/tracing/trace_config_parser.h new file mode 100644 index 00000000000000..112e438deaf6ba --- /dev/null +++ b/src/tracing/trace_config_parser.h @@ -0,0 +1,49 @@ +#ifndef SRC_TRACE_CONFIG_PARSER_H_ +#define SRC_TRACE_CONFIG_PARSER_H_ + +#include "libplatform/v8-tracing.h" +#include "v8.h" + +namespace node { +namespace tracing { + +using v8::platform::tracing::TraceConfig; +using v8::platform::tracing::TraceRecordMode; +using v8::Array; +using v8::Boolean; +using v8::Context; +using v8::HandleScope; +using v8::Isolate; +using v8::JSON; +using v8::Local; +using v8::NewStringType; +using v8::Object; +using v8::Value; +using v8::String; + +class TraceConfigParser { + public: + static void FillTraceConfig(Isolate* isolate, TraceConfig* trace_config, + const char* json_str); + + private: + static bool GetBoolean(Isolate* isolate, Local context, + Local object, const char* property); + + static int UpdateCategoriesList(Isolate* isolate, Local context, + Local object, + const char* property, + TraceConfig* trace_config); + + static TraceRecordMode GetTraceRecordMode(Isolate* isolate, + Local context, + Local object); + + static Local GetValue(Isolate* isolate, Local context, + Local object, const char* property); +}; + +} // namespace tracing +} // namespace node + +#endif // SRC_TRACE_CONFIG_PARSER_H_ diff --git a/src/tracing/trace_event.cc b/src/tracing/trace_event.cc new file mode 100644 index 00000000000000..b83cae6e05c10c --- /dev/null +++ b/src/tracing/trace_event.cc @@ -0,0 +1,17 @@ +#include "tracing/trace_event.h" + +namespace node { +namespace tracing { + +v8::Platform* platform_ = nullptr; + +void TraceEventHelper::SetCurrentPlatform(v8::Platform* platform) { + platform_ = platform; +} + +v8::Platform* TraceEventHelper::GetCurrentPlatform() { + return platform_; +} + +} // namespace tracing +} // namespace node diff --git a/src/tracing/trace_event.h b/src/tracing/trace_event.h new file mode 100644 index 00000000000000..b086a350071681 --- /dev/null +++ b/src/tracing/trace_event.h @@ -0,0 +1,589 @@ +#ifndef SRC_TRACING_TRACE_EVENT_H_ +#define SRC_TRACING_TRACE_EVENT_H_ + +#include + +#include "trace_event_common.h" +#include "v8-platform.h" + +// This header file defines implementation details of how the trace macros in +// trace_event_common.h collect and store trace events. Anything not +// implementation-specific should go in trace_macros_common.h instead of here. + + +// The pointer returned from GetCategoryGroupEnabled() points to a +// value with zero or more of the following bits. Used in this class only. +// The TRACE_EVENT macros should only use the value as a bool. +// These values must be in sync with macro values in trace_log.h in +// chromium. +enum CategoryGroupEnabledFlags { + // Category group enabled for the recording mode. + kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0, + // Category group enabled by SetEventCallbackEnabled(). + kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2, + // Category group enabled to export events to ETW. + kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3, +}; + +// By default, const char* asrgument values are assumed to have long-lived scope +// and will not be copied. Use this macro to force a const char* to be copied. +#define TRACE_STR_COPY(str) node::tracing::TraceStringWithCopy(str) + +// By default, uint64 ID argument values are not mangled with the Process ID in +// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling. +#define TRACE_ID_MANGLE(id) node::tracing::TraceID::ForceMangle(id) + +// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC +// macros. Use this macro to prevent Process ID mangling. +#define TRACE_ID_DONT_MANGLE(id) node::tracing::TraceID::DontMangle(id) + +// By default, trace IDs are eventually converted to a single 64-bit number. Use +// this macro to add a scope string. +#define TRACE_ID_WITH_SCOPE(scope, id) \ + node::tracing::TraceID::WithScope(scope, id) + +// Sets the current sample state to the given category and name (both must be +// constant strings). These states are intended for a sampling profiler. +// Implementation note: we store category and name together because we don't +// want the inconsistency/expense of storing two pointers. +// |thread_bucket| is [0..2] and is used to statically isolate samples in one +// thread from others. +#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ + name) \ + node::tracing::TraceEventSamplingStateScope::Set( \ + category "\0" name) + +// Returns a current sampling state of the given bucket. +#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \ + node::tracing::TraceEventSamplingStateScope::Current() + +// Creates a scope of a sampling state of the given bucket. +// +// { // The sampling state is set within this scope. +// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name"); +// ...; +// } +#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ + name) \ + node::tracing::TraceEventSamplingStateScope \ + traceEventSamplingScope(category "\0" name); + + +#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \ + *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \ + (kEnabledForRecording_CategoryGroupEnabledFlags | \ + kEnabledForEventCallback_CategoryGroupEnabledFlags) + +// The following macro has no implementation, but it needs to exist since +// it gets called from scoped trace events. It cannot call UNIMPLEMENTED() +// since an empty implementation is a valid one. +#define INTERNAL_TRACE_MEMORY(category, name) + +//////////////////////////////////////////////////////////////////////////////// +// Implementation specific tracing API definitions. + +// Get a pointer to the enabled state of the given trace category. Only +// long-lived literal strings should be given as the category group. The +// returned pointer can be held permanently in a local static for example. If +// the unsigned char is non-zero, tracing is enabled. If tracing is enabled, +// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled +// between the load of the tracing state and the call to +// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out +// for best performance when tracing is disabled. +// const uint8_t* +// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group) +#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \ + node::tracing::TraceEventHelper::GetCurrentPlatform() \ + ->GetCategoryGroupEnabled + +// Get the number of times traces have been recorded. This is used to implement +// the TRACE_EVENT_IS_NEW_TRACE facility. +// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED() +#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED UNIMPLEMENTED() + +// Add a trace event to the platform tracing system. +// uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT( +// char phase, +// const uint8_t* category_group_enabled, +// const char* name, +// const char* scope, +// uint64_t id, +// uint64_t bind_id, +// int num_args, +// const char** arg_names, +// const uint8_t* arg_types, +// const uint64_t* arg_values, +// unsigned int flags) +#define TRACE_EVENT_API_ADD_TRACE_EVENT \ + node::tracing::TraceEventHelper::GetCurrentPlatform()->AddTraceEvent + +// Set the duration field of a COMPLETE trace event. +// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( +// const uint8_t* category_group_enabled, +// const char* name, +// uint64_t id) +#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \ + node::tracing::TraceEventHelper::GetCurrentPlatform() \ + ->UpdateTraceEventDuration + +// Defines atomic operations used internally by the tracing system. +#define TRACE_EVENT_API_ATOMIC_WORD intptr_t +#define TRACE_EVENT_API_ATOMIC_LOAD(var) (var) +#define TRACE_EVENT_API_ATOMIC_STORE(var, value) (var) = (value) + +// The thread buckets for the sampling profiler. +extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; + +#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \ + g_trace_state[thread_bucket] + +//////////////////////////////////////////////////////////////////////////////// + +// Implementation detail: trace event macros create temporary variables +// to keep instrumentation overhead low. These macros give each temporary +// variable a unique name based on the line number to prevent name collisions. +#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b +#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b) +#define INTERNAL_TRACE_EVENT_UID(name_prefix) \ + INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__) + +// Implementation detail: internal macro to create static category. +// No barriers are needed, because this code is designed to operate safely +// even when the unsigned char* points to garbage data (which may be the case +// on processors without cache coherency). +// TODO(fmeawad): This implementation contradicts that we can have a different +// configuration for each isolate, +// https://code.google.com/p/v8/issues/detail?id=4563 +#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \ + category_group, atomic, category_group_enabled) \ + category_group_enabled = \ + reinterpret_cast(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \ + if (!category_group_enabled) { \ + category_group_enabled = \ + TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \ + TRACE_EVENT_API_ATOMIC_STORE( \ + atomic, reinterpret_cast( \ + category_group_enabled)); \ + } + +#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \ + static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \ + const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \ + category_group, INTERNAL_TRACE_EVENT_UID(atomic), \ + INTERNAL_TRACE_EVENT_UID(category_group_enabled)); + +// Implementation detail: internal macro to create static category and add +// event if the category is enabled. +#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \ + do { \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ + node::tracing::AddTraceEvent( \ + phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ + node::tracing::kGlobalScope, node::tracing::kNoId, \ + node::tracing::kNoId, flags, ##__VA_ARGS__); \ + } \ + } while (0) + +// Implementation detail: internal macro to create static category and add begin +// event if the category is enabled. Also adds the end event when the scope +// ends. +#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ + node::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \ + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ + uint64_t h = node::tracing::AddTraceEvent( \ + TRACE_EVENT_PHASE_COMPLETE, \ + INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ + node::tracing::kGlobalScope, node::tracing::kNoId, \ + node::tracing::kNoId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \ + INTERNAL_TRACE_EVENT_UID(tracer) \ + .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ + h); \ + } + +#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \ + bind_id, flow_flags, ...) \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ + node::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \ + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ + unsigned int trace_event_flags = flow_flags; \ + node::tracing::TraceID trace_event_bind_id(bind_id, \ + &trace_event_flags); \ + uint64_t h = node::tracing::AddTraceEvent( \ + TRACE_EVENT_PHASE_COMPLETE, \ + INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ + node::tracing::kGlobalScope, node::tracing::kNoId, \ + trace_event_bind_id.raw_id(), trace_event_flags, ##__VA_ARGS__); \ + INTERNAL_TRACE_EVENT_UID(tracer) \ + .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ + h); \ + } + +// Implementation detail: internal macro to create static category and add +// event if the category is enabled. +#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \ + flags, ...) \ + do { \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ + unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \ + node::tracing::TraceID trace_event_trace_id(id, \ + &trace_event_flags); \ + node::tracing::AddTraceEvent( \ + phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ + trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \ + node::tracing::kNoId, trace_event_flags, ##__VA_ARGS__); \ + } \ + } while (0) + +// Adds a trace event with a given timestamp. Not Implemented. +#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \ + timestamp, flags, ...) \ + UNIMPLEMENTED() + +// Adds a trace event with a given id and timestamp. Not Implemented. +#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \ + phase, category_group, name, id, timestamp, flags, ...) \ + UNIMPLEMENTED() + +// Adds a trace event with a given id, thread_id, and timestamp. Not +// Implemented. +#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + phase, category_group, name, id, thread_id, timestamp, flags, ...) \ + UNIMPLEMENTED() + +// Enter and leave a context based on the current scope. +#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \ + struct INTERNAL_TRACE_EVENT_UID(ScopedContext) { \ + public: \ + INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) { \ + TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_); \ + } \ + ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() { \ + TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_); \ + } \ + \ + private: \ + /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \ + INTERNAL_TRACE_EVENT_UID(ScopedContext) \ + (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ + void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ + uint64_t cid_; \ + }; \ + INTERNAL_TRACE_EVENT_UID(ScopedContext) \ + INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id()); + +namespace node { +namespace tracing { + +// Specify these values when the corresponding argument of AddTraceEvent is not +// used. +const int kZeroNumArgs = 0; +const decltype(nullptr) kGlobalScope = nullptr; +const uint64_t kNoId = 0; + +class TraceEventHelper { + public: + static void SetCurrentPlatform(v8::Platform* platform); + static v8::Platform* GetCurrentPlatform(); +}; + +// TraceID encapsulates an ID that can either be an integer or pointer. Pointers +// are by default mangled with the Process ID so that they are unlikely to +// collide when the same pointer is used on different processes. +class TraceID { + public: + class WithScope { + public: + WithScope(const char* scope, uint64_t raw_id) + : scope_(scope), raw_id_(raw_id) {} + uint64_t raw_id() const { return raw_id_; } + const char* scope() const { return scope_; } + + private: + const char* scope_ = nullptr; + uint64_t raw_id_; + }; + + class DontMangle { + public: + explicit DontMangle(const void* raw_id) + : raw_id_(static_cast(reinterpret_cast(raw_id))) {} + explicit DontMangle(uint64_t raw_id) : raw_id_(raw_id) {} + explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {} + explicit DontMangle(uint16_t raw_id) : raw_id_(raw_id) {} + explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {} + explicit DontMangle(int64_t raw_id) + : raw_id_(static_cast(raw_id)) {} + explicit DontMangle(int raw_id) : raw_id_(static_cast(raw_id)) {} + explicit DontMangle(int16_t raw_id) + : raw_id_(static_cast(raw_id)) {} + explicit DontMangle(signed char raw_id) + : raw_id_(static_cast(raw_id)) {} + explicit DontMangle(WithScope scoped_id) + : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {} + const char* scope() const { return scope_; } + uint64_t raw_id() const { return raw_id_; } + + private: + const char* scope_ = nullptr; + uint64_t raw_id_; + }; + + class ForceMangle { + public: + explicit ForceMangle(uint64_t raw_id) : raw_id_(raw_id) {} + explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {} + explicit ForceMangle(uint16_t raw_id) : raw_id_(raw_id) {} + explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {} + explicit ForceMangle(int64_t raw_id) + : raw_id_(static_cast(raw_id)) {} + explicit ForceMangle(int raw_id) : raw_id_(static_cast(raw_id)) {} + explicit ForceMangle(int16_t raw_id) + : raw_id_(static_cast(raw_id)) {} + explicit ForceMangle(signed char raw_id) + : raw_id_(static_cast(raw_id)) {} + uint64_t raw_id() const { return raw_id_; } + + private: + uint64_t raw_id_; + }; + + TraceID(const void* raw_id, unsigned int* flags) + : raw_id_(static_cast(reinterpret_cast(raw_id))) { + *flags |= TRACE_EVENT_FLAG_MANGLE_ID; + } + TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) { + *flags |= TRACE_EVENT_FLAG_MANGLE_ID; + } + TraceID(DontMangle maybe_scoped_id, unsigned int* flags) + : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {} + TraceID(uint64_t raw_id, unsigned int* flags) : raw_id_(raw_id) { + (void)flags; + } + TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) { + (void)flags; + } + TraceID(uint16_t raw_id, unsigned int* flags) : raw_id_(raw_id) { + (void)flags; + } + TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) { + (void)flags; + } + TraceID(int64_t raw_id, unsigned int* flags) + : raw_id_(static_cast(raw_id)) { + (void)flags; + } + TraceID(int raw_id, unsigned int* flags) + : raw_id_(static_cast(raw_id)) { + (void)flags; + } + TraceID(int16_t raw_id, unsigned int* flags) + : raw_id_(static_cast(raw_id)) { + (void)flags; + } + TraceID(signed char raw_id, unsigned int* flags) + : raw_id_(static_cast(raw_id)) { + (void)flags; + } + TraceID(WithScope scoped_id, unsigned int* flags) + : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {} + + uint64_t raw_id() const { return raw_id_; } + const char* scope() const { return scope_; } + + private: + const char* scope_ = nullptr; + uint64_t raw_id_; +}; + +// Simple union to store various types as uint64_t. +union TraceValueUnion { + bool as_bool; + uint64_t as_uint; + int64_t as_int; + double as_double; + const void* as_pointer; + const char* as_string; +}; + +// Simple container for const char* that should be copied instead of retained. +class TraceStringWithCopy { + public: + explicit TraceStringWithCopy(const char* str) : str_(str) {} + operator const char*() const { return str_; } + + private: + const char* str_; +}; + +// Define SetTraceValue for each allowed type. It stores the type and +// value in the return arguments. This allows this API to avoid declaring any +// structures so that it is portable to third_party libraries. +#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \ + value_type_id) \ + static inline void SetTraceValue(actual_type arg, unsigned char* type, \ + uint64_t* value) { \ + TraceValueUnion type_value; \ + type_value.union_member = arg; \ + *type = value_type_id; \ + *value = type_value.as_uint; \ + } +// Simpler form for int types that can be safely casted. +#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \ + static inline void SetTraceValue(actual_type arg, unsigned char* type, \ + uint64_t* value) { \ + *type = value_type_id; \ + *value = static_cast(arg); \ + } + +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL) +INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE) +INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer, + TRACE_VALUE_TYPE_POINTER) +INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string, + TRACE_VALUE_TYPE_STRING) +INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string, + TRACE_VALUE_TYPE_COPY_STRING) + +#undef INTERNAL_DECLARE_SET_TRACE_VALUE +#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT + +// These AddTraceEvent template +// function is defined here instead of in the macro, because the arg_values +// could be temporary objects, such as std::string. In order to store +// pointers to the internal c_str and pass through to the tracing API, +// the arg_values must live throughout these procedures. + +static inline uint64_t AddTraceEvent(char phase, + const uint8_t* category_group_enabled, + const char* name, const char* scope, + uint64_t id, uint64_t bind_id, + unsigned int flags) { + return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name, + scope, id, bind_id, kZeroNumArgs, NULL, + NULL, NULL, flags); +} + +template +static inline uint64_t AddTraceEvent( + char phase, const uint8_t* category_group_enabled, const char* name, + const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags, + const char* arg1_name, const ARG1_TYPE& arg1_val) { + const int num_args = 1; + uint8_t arg_types[1]; + uint64_t arg_values[1]; + SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); + return TRACE_EVENT_API_ADD_TRACE_EVENT( + phase, category_group_enabled, name, scope, id, bind_id, num_args, + &arg1_name, arg_types, arg_values, flags); +} + +template +static inline uint64_t AddTraceEvent( + char phase, const uint8_t* category_group_enabled, const char* name, + const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags, + const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name, + const ARG2_TYPE& arg2_val) { + const int num_args = 2; + const char* arg_names[2] = {arg1_name, arg2_name}; + unsigned char arg_types[2]; + uint64_t arg_values[2]; + SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); + SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]); + return TRACE_EVENT_API_ADD_TRACE_EVENT( + phase, category_group_enabled, name, scope, id, bind_id, num_args, + arg_names, arg_types, arg_values, flags); +} + +// Used by TRACE_EVENTx macros. Do not use directly. +class ScopedTracer { + public: + // Note: members of data_ intentionally left uninitialized. See Initialize. + ScopedTracer() : p_data_(NULL) {} + + ~ScopedTracer() { + if (p_data_ && *data_.category_group_enabled) + TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( + data_.category_group_enabled, data_.name, data_.event_handle); + } + + void Initialize(const uint8_t* category_group_enabled, const char* name, + uint64_t event_handle) { + data_.category_group_enabled = category_group_enabled; + data_.name = name; + data_.event_handle = event_handle; + p_data_ = &data_; + } + + private: + // This Data struct workaround is to avoid initializing all the members + // in Data during construction of this object, since this object is always + // constructed, even when tracing is disabled. If the members of Data were + // members of this class instead, compiler warnings occur about potential + // uninitialized accesses. + struct Data { + const uint8_t* category_group_enabled; + const char* name; + uint64_t event_handle; + }; + Data* p_data_; + Data data_; +}; + +// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly. +class ScopedTraceBinaryEfficient { + public: + ScopedTraceBinaryEfficient(const char* category_group, const char* name); + ~ScopedTraceBinaryEfficient(); + + private: + const uint8_t* category_group_enabled_; + const char* name_; + uint64_t event_handle_; +}; + +// TraceEventSamplingStateScope records the current sampling state +// and sets a new sampling state. When the scope exists, it restores +// the sampling state having recorded. +template +class TraceEventSamplingStateScope { + public: + explicit TraceEventSamplingStateScope(const char* category_and_name) { + previous_state_ = TraceEventSamplingStateScope::Current(); + TraceEventSamplingStateScope::Set(category_and_name); + } + + ~TraceEventSamplingStateScope() { + TraceEventSamplingStateScope::Set(previous_state_); + } + + static inline const char* Current() { + return reinterpret_cast( + TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber])); + } + + static inline void Set(const char* category_and_name) { + TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], + reinterpret_cast( + const_cast(category_and_name))); + } + + private: + const char* previous_state_; +}; + +} // namespace tracing +} // namespace node + +#endif // SRC_TRACING_TRACE_EVENT_H_ From e6c403eb6c8af23e7777f0a5393c94ddf5c8557e Mon Sep 17 00:00:00 2001 From: Kelvin Jin Date: Tue, 6 Sep 2016 11:36:12 -0700 Subject: [PATCH 02/22] Thread and shutdown safety for NodeTraceWriter (#3) * Thread and shutdown safety for NodeTraceWriter All concurrency in the tracing agent has been moved to NodeTraceWriter. Cleanup for the tracing thread happens in its destructor. * InternalTraceBuffer additional mutex in Flush() * Addressing review comments for 6e058e7 * Switch to using node's synchronization classes * Make serialization of flush events occur on trace thread --- src/tracing/agent.cc | 52 +++--------- src/tracing/agent.h | 11 +-- src/tracing/node_trace_buffer.cc | 89 ++++++++++++++------ src/tracing/node_trace_buffer.h | 27 +++--- src/tracing/node_trace_writer.cc | 139 ++++++++++++++++++++++++------- src/tracing/node_trace_writer.h | 31 +++++-- 6 files changed, 228 insertions(+), 121 deletions(-) diff --git a/src/tracing/agent.cc b/src/tracing/agent.cc index cb9d419ba8aa9d..aa0b5b1a045933 100644 --- a/src/tracing/agent.cc +++ b/src/tracing/agent.cc @@ -13,17 +13,16 @@ void Agent::Start(v8::Platform* platform, const char* trace_config_file) { auto env = parent_env_; platform_ = platform; - int err = uv_loop_init(&child_loop_); + int err = uv_loop_init(&tracing_loop_); CHECK_EQ(err, 0); - NodeTraceWriter* trace_writer = new NodeTraceWriter(&child_loop_); - TraceBuffer* trace_buffer = new NodeTraceBuffer( - NodeTraceBuffer::kBufferChunks, trace_writer, this); - - flush_signal_.data = trace_buffer; - err = uv_async_init(&child_loop_, &flush_signal_, FlushSignalCb); + err = uv_thread_create(&thread_, ThreadCb, this); CHECK_EQ(err, 0); + NodeTraceWriter* trace_writer = new NodeTraceWriter(&tracing_loop_); + TraceBuffer* trace_buffer = new NodeTraceBuffer( + NodeTraceBuffer::kBufferChunks, trace_writer, &tracing_loop_); + tracing_controller_ = new TracingController(); TraceConfig* trace_config = new TraceConfig(); @@ -40,9 +39,6 @@ void Agent::Start(v8::Platform* platform, const char* trace_config_file) { tracing_controller_->Initialize(trace_buffer); tracing_controller_->StartTracing(trace_config); v8::platform::SetTracingController(platform, tracing_controller_); - - err = uv_thread_create(&thread_, ThreadCb, this); - CHECK_EQ(err, 0); } void Agent::Stop() { @@ -51,43 +47,17 @@ void Agent::Stop() { } // Perform final Flush on TraceBuffer. We don't want the tracing controller // to flush the buffer again on destruction of the V8::Platform. - - // TODO: Note that the serialization of this Flush is done on the main thread - // and passed to libuv file write on the child thread later. - // We might want to perform the serialization on the child thread instead, - // but the current TracingController does not allow override of StopTracing(). - // By allowing serialization to be performed on the main thread, - // NodeTraceWriter::IsReady() is now accessed from multiple threads, and - // there might be race conditions if the child thread is still processing - // something at the moment. - // This can also be resolved by ensuring the child thread is free before - // stopping tracing. tracing_controller_->StopTracing(); delete tracing_controller_; + // Thread should finish when the tracing loop is stopped. + uv_thread_join(&thread_); v8::platform::SetTracingController(platform_, nullptr); - // TODO: We might need to cleanup loop properly upon exit. - // 1. Close child_loop_. - // 2. Close flush_signal_. - // 3. Destroy thread_. } // static -void Agent::ThreadCb(void* agent) { - static_cast(agent)->WorkerRun(); -} - -void Agent::WorkerRun() { - uv_run(&child_loop_, UV_RUN_DEFAULT); -} - -// static -void Agent::FlushSignalCb(uv_async_t* signal) { - static_cast(signal->data)->Flush(); -} - -void Agent::SendFlushSignal() { - int err = uv_async_send(&flush_signal_); - CHECK_EQ(err, 0); +void Agent::ThreadCb(void* arg) { + Agent* agent = static_cast(arg); + uv_run(&agent->tracing_loop_, UV_RUN_DEFAULT); } } // namespace tracing diff --git a/src/tracing/agent.h b/src/tracing/agent.h index ba96b25734f8cd..34418cb93f7fa9 100644 --- a/src/tracing/agent.h +++ b/src/tracing/agent.h @@ -19,20 +19,15 @@ class Agent { explicit Agent(Environment* env); void Start(v8::Platform* platform, const char* trace_config_file); void Stop(); - void SendFlushSignal(); private: - static void ThreadCb(void* agent); - static void FlushSignalCb(uv_async_t* signal); - bool IsStarted() { return platform_ != nullptr; } - void WorkerRun(); + static void ThreadCb(void* arg); + uv_thread_t thread_; + uv_loop_t tracing_loop_; v8::Platform* platform_ = nullptr; Environment* parent_env_; - uv_thread_t thread_; - uv_loop_t child_loop_; - uv_async_t flush_signal_; TracingController* tracing_controller_; }; diff --git a/src/tracing/node_trace_buffer.cc b/src/tracing/node_trace_buffer.cc index 8a2e1d4691447d..01d33e7f1b1365 100644 --- a/src/tracing/node_trace_buffer.cc +++ b/src/tracing/node_trace_buffer.cc @@ -1,15 +1,13 @@ #include "tracing/node_trace_buffer.h" -#include "tracing/agent.h" - namespace node { namespace tracing { const double InternalTraceBuffer::kFlushThreshold = 0.75; InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, - NodeTraceWriter* trace_writer, Agent* agent) - : max_chunks_(max_chunks), trace_writer_(trace_writer), agent_(agent) { + NodeTraceWriter* trace_writer, NodeTraceBuffer* external_buffer) + : max_chunks_(max_chunks), trace_writer_(trace_writer), external_buffer_(external_buffer) { chunks_.resize(max_chunks); } @@ -18,9 +16,9 @@ TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) { // If the buffer usage exceeds kFlushThreshold, attempt to perform a flush. // This number should be customizable. if (total_chunks_ >= max_chunks_ * kFlushThreshold) { - // Tell tracing agent thread to perform a Flush on this buffer. - // Note that the signal might be ignored if the writer is busy now. - agent_->SendFlushSignal(); + mutex_.Unlock(); + external_buffer_->Flush(false); + mutex_.Lock(); } // Create new chunk if last chunk is full or there is no chunk. if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) { @@ -58,15 +56,18 @@ TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) { return chunk->GetEventAt(event_index); } -void InternalTraceBuffer::Flush() { - for (size_t i = 0; i < total_chunks_; ++i) { - auto& chunk = chunks_[i]; - for (size_t j = 0; j < chunk->size(); ++j) { - trace_writer_->AppendTraceEvent(chunk->GetEventAt(j)); +void InternalTraceBuffer::Flush(bool blocking) { + { + Mutex::ScopedLock scoped_lock(mutex_); + for (size_t i = 0; i < total_chunks_; ++i) { + auto& chunk = chunks_[i]; + for (size_t j = 0; j < chunk->size(); ++j) { + trace_writer_->AppendTraceEvent(chunk->GetEventAt(j)); + } } + total_chunks_ = 0; } - trace_writer_->Flush(); - total_chunks_ = 0; + trace_writer_->Flush(blocking); } uint64_t InternalTraceBuffer::MakeHandle( @@ -85,11 +86,23 @@ void InternalTraceBuffer::ExtractHandle( } NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks, - NodeTraceWriter* trace_writer, Agent* agent) - : trace_writer_(trace_writer), - buffer1_(max_chunks, trace_writer, agent), - buffer2_(max_chunks, trace_writer, agent) { + NodeTraceWriter* trace_writer, uv_loop_t* tracing_loop) + : trace_writer_(trace_writer), tracing_loop_(tracing_loop), + buffer1_(max_chunks, trace_writer, this), + buffer2_(max_chunks, trace_writer, this) { current_buf_.store(&buffer1_); + + flush_signal_.data = this; + int err = uv_async_init(tracing_loop_, &flush_signal_, NonBlockingFlushSignalCb); + CHECK_EQ(err, 0); + + exit_signal_.data = this; + err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb); + CHECK_EQ(err, 0); +} + +NodeTraceBuffer::~NodeTraceBuffer() { + uv_async_send(&exit_signal_); } TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) { @@ -100,15 +113,16 @@ TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) { return current_buf_.load()->GetEventByHandle(handle); } +// TODO: This function is here to match the parent class signature, +// but it's not expressive enough because it doesn't allow us to specify +// whether it should block or not. The parent class signature should be changed +// in the future to include this parameter, and when that's done, this +// function should be removed. bool NodeTraceBuffer::Flush() { - // This function should mainly be called from the tracing agent thread. - // TODO: However, it could be called from the main thread, when - // the tracing controller stops tracing. - // Ideally we would want all Flushes to be done on the tracing thread, to - // prevent concurrent access of trace_writer_. - if (!trace_writer_->IsReady()) { - return false; - } + return Flush(true); +} + +void NodeTraceBuffer::FlushPrivate(bool blocking) { InternalTraceBuffer* prev_buf = current_buf_.load(); if (prev_buf == &buffer1_) { current_buf_.store(&buffer2_); @@ -117,9 +131,30 @@ bool NodeTraceBuffer::Flush() { } // Flush the other buffer. // Note that concurrently, we can AddTraceEvent to the current buffer. - prev_buf->Flush(); + prev_buf->Flush(blocking); +} + +// static +void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) { + NodeTraceBuffer* buffer = reinterpret_cast(signal->data); + buffer->FlushPrivate(false); +} + +bool NodeTraceBuffer::Flush(bool blocking) { + if (blocking) { + FlushPrivate(true); + } else { + uv_async_send(&flush_signal_); + } return true; } +// static +void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) { + NodeTraceBuffer* buffer = reinterpret_cast(signal->data); + uv_close(reinterpret_cast(&buffer->flush_signal_), nullptr); + uv_close(reinterpret_cast(&buffer->exit_signal_), nullptr); +} + } // namespace tracing } // namespace node diff --git a/src/tracing/node_trace_buffer.h b/src/tracing/node_trace_buffer.h index 97678070333151..a429b90314d5c0 100644 --- a/src/tracing/node_trace_buffer.h +++ b/src/tracing/node_trace_buffer.h @@ -7,13 +7,6 @@ #include -// Forward declaration to break recursive dependency chain with tracing/agent.h. -namespace node { -namespace tracing { - class Agent; -} // namespace tracing -} // namespace node - namespace node { namespace tracing { @@ -21,14 +14,17 @@ using v8::platform::tracing::TraceBuffer; using v8::platform::tracing::TraceBufferChunk; using v8::platform::tracing::TraceObject; +// forward declaration +class NodeTraceBuffer; + class InternalTraceBuffer { public: InternalTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer, - Agent* agent); + NodeTraceBuffer* external_buffer); TraceObject* AddTraceEvent(uint64_t* handle); TraceObject* GetEventByHandle(uint64_t handle); - void Flush(); + void Flush(bool blocking); static const double kFlushThreshold; @@ -42,7 +38,7 @@ class InternalTraceBuffer { Mutex mutex_; size_t max_chunks_; NodeTraceWriter* trace_writer_; - Agent* agent_; + NodeTraceBuffer* external_buffer_; std::vector> chunks_; size_t total_chunks_ = 0; uint32_t current_chunk_seq_ = 1; @@ -51,15 +47,24 @@ class InternalTraceBuffer { class NodeTraceBuffer : public TraceBuffer { public: NodeTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer, - Agent* agent); + uv_loop_t* tracing_loop); + ~NodeTraceBuffer(); TraceObject* AddTraceEvent(uint64_t* handle) override; TraceObject* GetEventByHandle(uint64_t handle) override; bool Flush() override; + bool Flush(bool blocking); static const size_t kBufferChunks = 1024; private: + void FlushPrivate(bool blocking); + static void NonBlockingFlushSignalCb(uv_async_t* signal); + static void ExitSignalCb(uv_async_t* signal); + + uv_loop_t* tracing_loop_; + uv_async_t flush_signal_; + uv_async_t exit_signal_; std::unique_ptr trace_writer_; // TODO: Change std::atomic to something less contentious. std::atomic current_buf_; diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index 0bc61ad86e610c..6c99c3af9ccb9b 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -10,48 +10,64 @@ namespace tracing { NodeTraceWriter::NodeTraceWriter(uv_loop_t* tracing_loop) : tracing_loop_(tracing_loop) { - write_req_.writer = this; + flush_signal_.data = this; + int err = uv_async_init(tracing_loop_, &flush_signal_, FlushSignalCb); + CHECK_EQ(err, 0); + + exit_signal_.data = this; + err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb); + CHECK_EQ(err, 0); } void NodeTraceWriter::WriteSuffix() { // If our final log file has traces, then end the file appropriately. // This means that if no trace events are recorded, then no trace file is // produced. - if (total_traces_ > 0) { - WriteToFile("]}\n"); + bool should_flush = false; + { + Mutex::ScopedLock scoped_lock(stream_mutex_); + if (total_traces_ > 0) { + total_traces_ = 0; // so we don't write it again in FlushPrivate + stream_ << "]}\n"; + should_flush = true; + } + } + if (should_flush) { + Flush(true); } } NodeTraceWriter::~NodeTraceWriter() { WriteSuffix(); uv_fs_t req; - int err = uv_fs_close(tracing_loop_, &req, fd_, nullptr); - CHECK_EQ(err, 0); - uv_fs_req_cleanup(&req); - uv_close(reinterpret_cast(&trace_file_pipe_), nullptr); + int err; + if (fd_ != -1) { + err = uv_fs_close(tracing_loop_, &req, fd_, nullptr); + CHECK_EQ(err, 0); + uv_fs_req_cleanup(&req); + uv_close(reinterpret_cast(&trace_file_pipe_), nullptr); + } + uv_async_send(&exit_signal_); } void NodeTraceWriter::OpenNewFileForStreaming() { ++file_num_; uv_fs_t req; - std::string log_file = "node_trace.log." + std::to_string(file_num_); + std::string log_file = "node_trace." + std::to_string(file_num_) + ".log"; fd_ = uv_fs_open(tracing_loop_, &req, log_file.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0644, NULL); + CHECK_NE(fd_, -1); uv_fs_req_cleanup(&req); uv_pipe_init(tracing_loop_, &trace_file_pipe_, 0); uv_pipe_open(&trace_file_pipe_, fd_); - // Note that the following does not get flushed to file immediately. - stream_ << "{\"traceEvents\":["; } void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) { - // Set a writing_ flag here so that we only do a single Flush at any one - // point in time. This is because we need the stream_ to be alive when we - // are flushing. - is_writing_ = true; + Mutex::ScopedLock scoped_lock(stream_mutex_); // If this is the first trace event, open a new file for streaming. if (total_traces_ == 0) { OpenNewFileForStreaming(); + stream_ << "{\"traceEvents\":["; } else { stream_ << ",\n"; } @@ -75,29 +91,96 @@ void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) { stream_ << "}"; } +void NodeTraceWriter::FlushPrivate() { + std::string str; + int highest_request_id; + bool should_write = false; + { + Mutex::ScopedLock stream_scoped_lock(stream_mutex_); + if (total_traces_ >= kTracesPerFile) { + total_traces_ = 0; + stream_ << "]}\n"; + } + // str() makes a copy of the contents of the stream. + str = stream_.str(); + stream_.str(""); + stream_.clear(); + if (str.length() > 0 && fd_ != -1) { + Mutex::ScopedLock request_scoped_lock(request_mutex_); + highest_request_id = num_write_requests_++; + should_write = true; + } + } + if (should_write) { + WriteToFile(str, highest_request_id); + } +} + +void NodeTraceWriter::FlushSignalCb(uv_async_t* signal) { + NodeTraceWriter* trace_writer = static_cast(signal->data); + trace_writer->FlushPrivate(); +} + +// TODO: Remove (is it necessary to change the API? Since because of WriteSuffix +// it no longer matters whether it's true or false) void NodeTraceWriter::Flush() { - if (total_traces_ >= kTracesPerFile) { - total_traces_ = 0; - stream_ << "]}\n"; + Flush(true); +} + +void NodeTraceWriter::Flush(bool blocking) { + int err = uv_async_send(&flush_signal_); + CHECK_EQ(err, 0); + Mutex::ScopedLock scoped_lock(request_mutex_); + int request_id = num_write_requests_++; + if (blocking) { + // Wait until data associated with this request id has been written to disk. + // This guarantees that data from all earlier requests have also been + // written. + while (request_id > highest_request_id_completed_) { + request_cond_.Wait(scoped_lock); + } } - WriteToFile(stream_.str().c_str()); } -void NodeTraceWriter::WriteToFile(const char* str) { - uv_buf_t uv_buf = uv_buf_init(const_cast(str), strlen(str)); - // We can reuse write_req_ here because we are assured that no other - // uv_write using write_req_ is ongoing. In the case of synchronous writes - // where the callback has not been fired, it is unclear whether it is OK - // to reuse write_req_. - uv_write(reinterpret_cast(&write_req_), +void NodeTraceWriter::WriteToFile(std::string str, int highest_request_id) { + const char* c_str = str.c_str(); + uv_buf_t uv_buf = uv_buf_init(const_cast(c_str), strlen(c_str)); + WriteRequest* write_req = new WriteRequest(); + write_req->writer = this; + write_req->highest_request_id = highest_request_id; + request_mutex_.Lock(); + // Manage a queue of WriteRequest objects because the behavior of uv_write is + // is undefined if the same WriteRequest object is used more than once + // between WriteCb calls. In addition, this allows us to keep track of the id + // of the latest write request that actually been completed. + write_req_queue_.push(write_req); + request_mutex_.Unlock(); + // TODO: Is the return value of back() guaranteed to always have the + // same address? + uv_write(reinterpret_cast(write_req), reinterpret_cast(&trace_file_pipe_), &uv_buf, 1, WriteCb); } void NodeTraceWriter::WriteCb(uv_write_t* req, int status) { - NodeTraceWriter* writer = reinterpret_cast(req)->writer; - writer->stream_.str(""); - writer->is_writing_ = false; + WriteRequest* write_req = reinterpret_cast(req); + NodeTraceWriter* writer = write_req->writer; + int highest_request_id = write_req->highest_request_id; + { + Mutex::ScopedLock scoped_lock(writer->request_mutex_); + CHECK_EQ(write_req, writer->write_req_queue_.front()); + writer->write_req_queue_.pop(); + writer->highest_request_id_completed_ = highest_request_id; + writer->request_cond_.Broadcast(scoped_lock); + } + delete write_req; +} + +// static +void NodeTraceWriter::ExitSignalCb(uv_async_t* signal) { + NodeTraceWriter* trace_writer = static_cast(signal->data); + uv_close(reinterpret_cast(&trace_writer->flush_signal_), nullptr); + uv_close(reinterpret_cast(&trace_writer->exit_signal_), nullptr); } } // namespace tracing diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h index 30e12112bbc795..e45d85ad409f34 100644 --- a/src/tracing/node_trace_writer.h +++ b/src/tracing/node_trace_writer.h @@ -2,7 +2,9 @@ #define SRC_NODE_TRACE_WRITER_H_ #include +#include +#include "node_mutex.h" #include "libplatform/v8-tracing.h" #include "uv.h" @@ -18,9 +20,9 @@ class NodeTraceWriter : public TraceWriter { NodeTraceWriter(uv_loop_t* tracing_loop); ~NodeTraceWriter(); - bool IsReady() { return !is_writing_; } void AppendTraceEvent(TraceObject* trace_event) override; void Flush() override; + void Flush(bool blocking); static const int kTracesPerFile = 1 << 20; @@ -28,21 +30,38 @@ class NodeTraceWriter : public TraceWriter { struct WriteRequest { uv_write_t req; NodeTraceWriter* writer; + int highest_request_id; }; static void WriteCb(uv_write_t* req, int status); void OpenNewFileForStreaming(); - void WriteToFile(const char* str); + void WriteToFile(std::string str, int highest_request_id); void WriteSuffix(); + static void FlushSignalCb(uv_async_t* signal); + void FlushPrivate(); + static void ExitSignalCb(uv_async_t* signal); uv_loop_t* tracing_loop_; - int fd_; + // Triggers callback to initiate writing the contents of stream_ to disk. + uv_async_t flush_signal_; + // Triggers callback to close async objects, ending the tracing thread. + uv_async_t exit_signal_; + // Prevents concurrent R/W on state related to serialized trace data + // before it's written to disk, namely stream_ and total_traces_. + Mutex stream_mutex_; + // Prevents concurrent R/W on state related to write requests. + Mutex request_mutex_; + // Allows blocking calls to Flush() to wait on a condition for + // trace events to be written to disk. + ConditionVariable request_cond_; + int fd_ = -1; + std::queue write_req_queue_; + int num_write_requests_ = 0; + int highest_request_id_completed_ = 0; int total_traces_ = 0; int file_num_ = 0; - WriteRequest write_req_; - uv_pipe_t trace_file_pipe_; - bool is_writing_ = false; std::ostringstream stream_; + uv_pipe_t trace_file_pipe_; }; } // namespace tracing From d76d9cf2896cc4aeece8d9eef582a8ee7a1af607 Mon Sep 17 00:00:00 2001 From: Kelvin Jin Date: Fri, 9 Sep 2016 19:27:47 -0700 Subject: [PATCH 03/22] Use V8's internal tracing system for traces (#4) * Use V8's internal tracing system for traces * Comments on usage of JSONTraceWriter destructor --- src/tracing/node_trace_writer.cc | 35 ++++++++++++-------------------- src/tracing/node_trace_writer.h | 1 + 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index 6c99c3af9ccb9b..c50b43cc6af5ee 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -28,7 +28,8 @@ void NodeTraceWriter::WriteSuffix() { Mutex::ScopedLock scoped_lock(stream_mutex_); if (total_traces_ > 0) { total_traces_ = 0; // so we don't write it again in FlushPrivate - stream_ << "]}\n"; + // Appends "]}\n" to stream_. + delete json_trace_writer_; should_flush = true; } } @@ -67,28 +68,16 @@ void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) { // If this is the first trace event, open a new file for streaming. if (total_traces_ == 0) { OpenNewFileForStreaming(); - stream_ << "{\"traceEvents\":["; - } else { - stream_ << ",\n"; + // Constructing a new JSONTraceWriter object appends "{\"traceEvents\":[" + // to stream_. + // In other words, the constructor initializes the serialization stream + // to a state where we can start writing trace events to it. + // Repeatedly constructing and destroying json_trace_writer_ allows + // us to use V8's JSON writer instead of implementing our own. + json_trace_writer_ = TraceWriter::CreateJSONTraceWriter(stream_); } ++total_traces_; - stream_ << "{\"pid\":" << trace_event->pid() - << ",\"tid\":" << trace_event->tid() - << ",\"ts\":" << trace_event->ts() - << ",\"tts\":" << trace_event->tts() << ",\"ph\":\"" - << trace_event->phase() << "\",\"cat\":\"" - << TracingController::GetCategoryGroupName( - trace_event->category_enabled_flag()) - << "\",\"name\":\"" << trace_event->name() - << "\",\"args\":{},\"dur\":" << trace_event->duration() - << ",\"tdur\":" << trace_event->cpu_duration(); - if (trace_event->flags() & TRACE_EVENT_FLAG_HAS_ID) { - if (trace_event->scope() != nullptr) { - stream_ << ",\"scope\":\"" << trace_event->scope() << "\""; - } - stream_ << ",\"id\":" << trace_event->id(); - } - stream_ << "}"; + json_trace_writer_->AppendTraceEvent(trace_event); } void NodeTraceWriter::FlushPrivate() { @@ -99,7 +88,9 @@ void NodeTraceWriter::FlushPrivate() { Mutex::ScopedLock stream_scoped_lock(stream_mutex_); if (total_traces_ >= kTracesPerFile) { total_traces_ = 0; - stream_ << "]}\n"; + // Destroying the member JSONTraceWriter object appends "]"\n"" to + // stream_ - in other words, ending a JSON file. + delete json_trace_writer_; } // str() makes a copy of the contents of the stream. str = stream_.str(); diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h index e45d85ad409f34..b7bdc5ac672ad9 100644 --- a/src/tracing/node_trace_writer.h +++ b/src/tracing/node_trace_writer.h @@ -61,6 +61,7 @@ class NodeTraceWriter : public TraceWriter { int total_traces_ = 0; int file_num_ = 0; std::ostringstream stream_; + TraceWriter* json_trace_writer_; uv_pipe_t trace_file_pipe_; }; From 5edf5f25b2099706feac0a7586235baaa09727cc Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Fri, 23 Sep 2016 11:57:40 -0700 Subject: [PATCH 04/22] Clean up compile warnings + Fixes --- src/tracing/node_trace_buffer.cc | 2 +- src/tracing/trace_config_parser.cc | 25 ++++++++++++++++--------- src/tracing/trace_config_parser.h | 6 ------ src/tracing/trace_event.h | 4 ++++ 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/tracing/node_trace_buffer.cc b/src/tracing/node_trace_buffer.cc index 01d33e7f1b1365..07541b6089400b 100644 --- a/src/tracing/node_trace_buffer.cc +++ b/src/tracing/node_trace_buffer.cc @@ -87,7 +87,7 @@ void InternalTraceBuffer::ExtractHandle( NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer, uv_loop_t* tracing_loop) - : trace_writer_(trace_writer), tracing_loop_(tracing_loop), + : tracing_loop_(tracing_loop), trace_writer_(trace_writer), buffer1_(max_chunks, trace_writer, this), buffer2_(max_chunks, trace_writer, this) { current_buf_.store(&buffer1_); diff --git a/src/tracing/trace_config_parser.cc b/src/tracing/trace_config_parser.cc index 626eeec81f0576..c18b17a0bb4955 100644 --- a/src/tracing/trace_config_parser.cc +++ b/src/tracing/trace_config_parser.cc @@ -5,17 +5,24 @@ namespace node { namespace tracing { +using v8::Array; +using v8::Boolean; +using v8::HandleScope; +using v8::JSON; +using v8::NewStringType; +using v8::String; + // String options that can be used to initialize TraceOptions. -const char* kRecordUntilFull = "record-until-full"; -const char* kRecordContinuously = "record-continuously"; -const char* kRecordAsMuchAsPossible = "record-as-much-as-possible"; +static const char* kRecordUntilFull = "record-until-full"; +static const char* kRecordContinuously = "record-continuously"; +static const char* kRecordAsMuchAsPossible = "record-as-much-as-possible"; -const char* kRecordModeParam = "record_mode"; -const char* kEnableSamplingParam = "enable_sampling"; -const char* kEnableSystraceParam = "enable_systrace"; -const char* kEnableArgumentFilterParam = "enable_argument_filter"; -const char* kIncludedCategoriesParam = "included_categories"; -const char* kExcludedCategoriesParam = "excluded_categories"; +static const char* kRecordModeParam = "record_mode"; +static const char* kEnableSamplingParam = "enable_sampling"; +static const char* kEnableSystraceParam = "enable_systrace"; +static const char* kEnableArgumentFilterParam = "enable_argument_filter"; +static const char* kIncludedCategoriesParam = "included_categories"; +static const char* kExcludedCategoriesParam = "excluded_categories"; void TraceConfigParser::FillTraceConfig( Isolate* isolate, TraceConfig* trace_config, const char* json_str) { diff --git a/src/tracing/trace_config_parser.h b/src/tracing/trace_config_parser.h index 112e438deaf6ba..04bf070dde7e36 100644 --- a/src/tracing/trace_config_parser.h +++ b/src/tracing/trace_config_parser.h @@ -9,17 +9,11 @@ namespace tracing { using v8::platform::tracing::TraceConfig; using v8::platform::tracing::TraceRecordMode; -using v8::Array; -using v8::Boolean; using v8::Context; -using v8::HandleScope; using v8::Isolate; -using v8::JSON; using v8::Local; -using v8::NewStringType; using v8::Object; using v8::Value; -using v8::String; class TraceConfigParser { public: diff --git a/src/tracing/trace_event.h b/src/tracing/trace_event.h index b086a350071681..bd0279ca2c2dca 100644 --- a/src/tracing/trace_event.h +++ b/src/tracing/trace_event.h @@ -1,3 +1,7 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + #ifndef SRC_TRACING_TRACE_EVENT_H_ #define SRC_TRACING_TRACE_EVENT_H_ From be497544d3eec998d9caad0c5342dad56e9c2788 Mon Sep 17 00:00:00 2001 From: Kelvin Jin Date: Tue, 25 Oct 2016 15:07:31 -0700 Subject: [PATCH 05/22] Fix Trace Event synchronization issues (#5) * Fixed writer segfault condition & changed buffer swap logic * Fixed premature trace thread exiting and num_write_requests race condition * Add full constraint for buffer flushing * Added async handle destruction CV in NodeTraceBuffer * Trigger write regardless of stream length * Moved ExitSignalCb ScopedLock objects to top of scope * Make current_seq_number a property of internal buffers, and other commment addressing --- src/tracing/agent.cc | 10 ++- src/tracing/node_trace_buffer.cc | 120 +++++++++++++++++-------------- src/tracing/node_trace_buffer.h | 23 ++++-- src/tracing/node_trace_writer.cc | 26 ++++--- src/tracing/node_trace_writer.h | 7 +- 5 files changed, 111 insertions(+), 75 deletions(-) diff --git a/src/tracing/agent.cc b/src/tracing/agent.cc index aa0b5b1a045933..8fdd23f6ce387c 100644 --- a/src/tracing/agent.cc +++ b/src/tracing/agent.cc @@ -16,9 +16,6 @@ void Agent::Start(v8::Platform* platform, const char* trace_config_file) { int err = uv_loop_init(&tracing_loop_); CHECK_EQ(err, 0); - err = uv_thread_create(&thread_, ThreadCb, this); - CHECK_EQ(err, 0); - NodeTraceWriter* trace_writer = new NodeTraceWriter(&tracing_loop_); TraceBuffer* trace_buffer = new NodeTraceBuffer( NodeTraceBuffer::kBufferChunks, trace_writer, &tracing_loop_); @@ -36,6 +33,13 @@ void Agent::Start(v8::Platform* platform, const char* trace_config_file) { trace_config->AddIncludedCategory("v8"); trace_config->AddIncludedCategory("node"); } + + // This thread should be created *after* async handles are created + // (within NodeTraceWriter and NodeTraceBuffer constructors). + // Otherwise the thread could shut down prematurely. + err = uv_thread_create(&thread_, ThreadCb, this); + CHECK_EQ(err, 0); + tracing_controller_->Initialize(trace_buffer); tracing_controller_->StartTracing(trace_config); v8::platform::SetTracingController(platform, tracing_controller_); diff --git a/src/tracing/node_trace_buffer.cc b/src/tracing/node_trace_buffer.cc index 07541b6089400b..640d437f7f5c61 100644 --- a/src/tracing/node_trace_buffer.cc +++ b/src/tracing/node_trace_buffer.cc @@ -3,30 +3,17 @@ namespace node { namespace tracing { -const double InternalTraceBuffer::kFlushThreshold = 0.75; - -InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, +InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id, NodeTraceWriter* trace_writer, NodeTraceBuffer* external_buffer) - : max_chunks_(max_chunks), trace_writer_(trace_writer), external_buffer_(external_buffer) { + : id_(id), flushing_(false), max_chunks_(max_chunks), + trace_writer_(trace_writer), external_buffer_(external_buffer) { chunks_.resize(max_chunks); } TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) { Mutex::ScopedLock scoped_lock(mutex_); - // If the buffer usage exceeds kFlushThreshold, attempt to perform a flush. - // This number should be customizable. - if (total_chunks_ >= max_chunks_ * kFlushThreshold) { - mutex_.Unlock(); - external_buffer_->Flush(false); - mutex_.Lock(); - } // Create new chunk if last chunk is full or there is no chunk. if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) { - if (total_chunks_ == max_chunks_) { - // There is no more space to store more trace events. - *handle = MakeHandle(0, 0, 0); - return nullptr; - } auto& chunk = chunks_[total_chunks_++]; if (chunk) { chunk->Reset(current_chunk_seq_++); @@ -43,14 +30,22 @@ TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) { TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) { Mutex::ScopedLock scoped_lock(mutex_); + if (handle == 0) { + // A handle value of zero never has a trace event associated with it. + return NULL; + } size_t chunk_index, event_index; - uint32_t chunk_seq; - ExtractHandle(handle, &chunk_index, &chunk_seq, &event_index); - if (chunk_index >= total_chunks_) { + uint32_t buffer_id, chunk_seq; + ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index); + if (buffer_id != id_ || chunk_index >= total_chunks_) { + // Either the chunk belongs to the other buffer, or is outside the current + // range of chunks loaded in memory (the latter being true suggests that + // the chunk has already been flushed and is no longer in memory.) return NULL; } auto& chunk = chunks_[chunk_index]; if (chunk->seq() != chunk_seq) { + // Chunk is no longer in memory. return NULL; } return chunk->GetEventAt(event_index); @@ -59,26 +54,32 @@ TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) { void InternalTraceBuffer::Flush(bool blocking) { { Mutex::ScopedLock scoped_lock(mutex_); - for (size_t i = 0; i < total_chunks_; ++i) { - auto& chunk = chunks_[i]; - for (size_t j = 0; j < chunk->size(); ++j) { - trace_writer_->AppendTraceEvent(chunk->GetEventAt(j)); + if (total_chunks_ > 0) { + flushing_ = true; + for (size_t i = 0; i < total_chunks_; ++i) { + auto& chunk = chunks_[i]; + for (size_t j = 0; j < chunk->size(); ++j) { + trace_writer_->AppendTraceEvent(chunk->GetEventAt(j)); + } } + total_chunks_ = 0; + flushing_ = false; } - total_chunks_ = 0; } trace_writer_->Flush(blocking); } uint64_t InternalTraceBuffer::MakeHandle( size_t chunk_index, uint32_t chunk_seq, size_t event_index) const { - return static_cast(chunk_seq) * Capacity() + - chunk_index * TraceBufferChunk::kChunkSize + event_index; + return ((static_cast(chunk_seq) * Capacity() + + chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_; } void InternalTraceBuffer::ExtractHandle( - uint64_t handle, size_t* chunk_index, + uint64_t handle, uint32_t* buffer_id, size_t* chunk_index, uint32_t* chunk_seq, size_t* event_index) const { + *buffer_id = static_cast(handle & 0x1); + handle >>= 1; *chunk_seq = static_cast(handle / Capacity()); size_t indices = handle % Capacity(); *chunk_index = indices / TraceBufferChunk::kChunkSize; @@ -88,14 +89,14 @@ void InternalTraceBuffer::ExtractHandle( NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer, uv_loop_t* tracing_loop) : tracing_loop_(tracing_loop), trace_writer_(trace_writer), - buffer1_(max_chunks, trace_writer, this), - buffer2_(max_chunks, trace_writer, this) { + buffer1_(max_chunks, 0, trace_writer, this), + buffer2_(max_chunks, 1, trace_writer, this) { current_buf_.store(&buffer1_); flush_signal_.data = this; int err = uv_async_init(tracing_loop_, &flush_signal_, NonBlockingFlushSignalCb); CHECK_EQ(err, 0); - + exit_signal_.data = this; err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb); CHECK_EQ(err, 0); @@ -103,9 +104,21 @@ NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks, NodeTraceBuffer::~NodeTraceBuffer() { uv_async_send(&exit_signal_); + Mutex::ScopedLock scoped_lock(exit_mutex_); + while(!exited_) { + exit_cond_.Wait(scoped_lock); + } } TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) { + // If the buffer is full, attempt to perform a flush. + if (!TryLoadAvailableBuffer()) { + // Assign a value of zero as the trace event handle. + // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0), + // and will cause GetEventByHandle to return NULL if passed as an argument. + *handle = 0; + return nullptr; + } return current_buf_.load()->AddTraceEvent(handle); } @@ -113,47 +126,48 @@ TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) { return current_buf_.load()->GetEventByHandle(handle); } -// TODO: This function is here to match the parent class signature, -// but it's not expressive enough because it doesn't allow us to specify -// whether it should block or not. The parent class signature should be changed -// in the future to include this parameter, and when that's done, this -// function should be removed. bool NodeTraceBuffer::Flush() { - return Flush(true); + buffer1_.Flush(true); + buffer2_.Flush(true); } -void NodeTraceBuffer::FlushPrivate(bool blocking) { +// Attempts to set current_buf_ such that it references a buffer that can +// can write at least one trace event. If both buffers are unavailable this +// method returns false; otherwise it returns true. +bool NodeTraceBuffer::TryLoadAvailableBuffer() { InternalTraceBuffer* prev_buf = current_buf_.load(); - if (prev_buf == &buffer1_) { - current_buf_.store(&buffer2_); - } else { - current_buf_.store(&buffer1_); + if (prev_buf->IsFull()) { + uv_async_send(&flush_signal_); // trigger flush on a separate thread + InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ? + &buffer2_ : &buffer1_; + if (!other_buf->IsFull()) { + current_buf_.store(other_buf); + } else { + return false; + } } - // Flush the other buffer. - // Note that concurrently, we can AddTraceEvent to the current buffer. - prev_buf->Flush(blocking); + return true; } // static void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) { NodeTraceBuffer* buffer = reinterpret_cast(signal->data); - buffer->FlushPrivate(false); -} - -bool NodeTraceBuffer::Flush(bool blocking) { - if (blocking) { - FlushPrivate(true); - } else { - uv_async_send(&flush_signal_); + if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) { + buffer->buffer1_.Flush(false); + } + if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) { + buffer->buffer2_.Flush(false); } - return true; } // static void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) { NodeTraceBuffer* buffer = reinterpret_cast(signal->data); + Mutex::ScopedLock scoped_lock(buffer->exit_mutex_); uv_close(reinterpret_cast(&buffer->flush_signal_), nullptr); uv_close(reinterpret_cast(&buffer->exit_signal_), nullptr); + buffer->exited_ = true; + buffer->exit_cond_.Signal(scoped_lock); } } // namespace tracing diff --git a/src/tracing/node_trace_buffer.h b/src/tracing/node_trace_buffer.h index a429b90314d5c0..619799fdb21978 100644 --- a/src/tracing/node_trace_buffer.h +++ b/src/tracing/node_trace_buffer.h @@ -19,29 +19,36 @@ class NodeTraceBuffer; class InternalTraceBuffer { public: - InternalTraceBuffer(size_t max_chunks, NodeTraceWriter* trace_writer, + InternalTraceBuffer(size_t max_chunks, uint32_t id, + NodeTraceWriter* trace_writer, NodeTraceBuffer* external_buffer); TraceObject* AddTraceEvent(uint64_t* handle); TraceObject* GetEventByHandle(uint64_t handle); void Flush(bool blocking); - - static const double kFlushThreshold; + bool IsFull() const { + return total_chunks_ == max_chunks_ && chunks_[total_chunks_ - 1]->IsFull(); + } + bool IsFlushing() const { + return flushing_; + } private: uint64_t MakeHandle(size_t chunk_index, uint32_t chunk_seq, size_t event_index) const; - void ExtractHandle(uint64_t handle, size_t* chunk_index, + void ExtractHandle(uint64_t handle, uint32_t* buffer_id, size_t* chunk_index, uint32_t* chunk_seq, size_t* event_index) const; size_t Capacity() const { return max_chunks_ * TraceBufferChunk::kChunkSize; } Mutex mutex_; + bool flushing_; size_t max_chunks_; NodeTraceWriter* trace_writer_; NodeTraceBuffer* external_buffer_; std::vector> chunks_; size_t total_chunks_ = 0; uint32_t current_chunk_seq_ = 1; + uint32_t id_; }; class NodeTraceBuffer : public TraceBuffer { @@ -53,18 +60,22 @@ class NodeTraceBuffer : public TraceBuffer { TraceObject* AddTraceEvent(uint64_t* handle) override; TraceObject* GetEventByHandle(uint64_t handle) override; bool Flush() override; - bool Flush(bool blocking); static const size_t kBufferChunks = 1024; private: - void FlushPrivate(bool blocking); + bool TryLoadAvailableBuffer(); static void NonBlockingFlushSignalCb(uv_async_t* signal); static void ExitSignalCb(uv_async_t* signal); uv_loop_t* tracing_loop_; uv_async_t flush_signal_; uv_async_t exit_signal_; + bool exited_ = false; + // Used exclusively for exit logic. + Mutex exit_mutex_; + // Used to wait until async handles have been closed. + ConditionVariable exit_cond_; std::unique_ptr trace_writer_; // TODO: Change std::atomic to something less contentious. std::atomic current_buf_; diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index c50b43cc6af5ee..16bdd31af15d5e 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -49,6 +49,10 @@ NodeTraceWriter::~NodeTraceWriter() { uv_close(reinterpret_cast(&trace_file_pipe_), nullptr); } uv_async_send(&exit_signal_); + Mutex::ScopedLock scoped_lock(request_mutex_); + while(!exited_) { + exit_cond_.Wait(scoped_lock); + } } void NodeTraceWriter::OpenNewFileForStreaming() { @@ -83,7 +87,6 @@ void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) { void NodeTraceWriter::FlushPrivate() { std::string str; int highest_request_id; - bool should_write = false; { Mutex::ScopedLock stream_scoped_lock(stream_mutex_); if (total_traces_ >= kTracesPerFile) { @@ -96,15 +99,12 @@ void NodeTraceWriter::FlushPrivate() { str = stream_.str(); stream_.str(""); stream_.clear(); - if (str.length() > 0 && fd_ != -1) { - Mutex::ScopedLock request_scoped_lock(request_mutex_); - highest_request_id = num_write_requests_++; - should_write = true; - } } - if (should_write) { - WriteToFile(str, highest_request_id); + { + Mutex::ScopedLock request_scoped_lock(request_mutex_); + highest_request_id = num_write_requests_; } + WriteToFile(str, highest_request_id); } void NodeTraceWriter::FlushSignalCb(uv_async_t* signal) { @@ -119,10 +119,10 @@ void NodeTraceWriter::Flush() { } void NodeTraceWriter::Flush(bool blocking) { + Mutex::ScopedLock scoped_lock(request_mutex_); + int request_id = ++num_write_requests_; int err = uv_async_send(&flush_signal_); CHECK_EQ(err, 0); - Mutex::ScopedLock scoped_lock(request_mutex_); - int request_id = num_write_requests_++; if (blocking) { // Wait until data associated with this request id has been written to disk. // This guarantees that data from all earlier requests have also been @@ -148,9 +148,10 @@ void NodeTraceWriter::WriteToFile(std::string str, int highest_request_id) { request_mutex_.Unlock(); // TODO: Is the return value of back() guaranteed to always have the // same address? - uv_write(reinterpret_cast(write_req), + int err = uv_write(reinterpret_cast(write_req), reinterpret_cast(&trace_file_pipe_), &uv_buf, 1, WriteCb); + CHECK_EQ(err, 0); } void NodeTraceWriter::WriteCb(uv_write_t* req, int status) { @@ -170,8 +171,11 @@ void NodeTraceWriter::WriteCb(uv_write_t* req, int status) { // static void NodeTraceWriter::ExitSignalCb(uv_async_t* signal) { NodeTraceWriter* trace_writer = static_cast(signal->data); + Mutex::ScopedLock scoped_lock(trace_writer->request_mutex_); uv_close(reinterpret_cast(&trace_writer->flush_signal_), nullptr); uv_close(reinterpret_cast(&trace_writer->exit_signal_), nullptr); + trace_writer->exited_ = true; + trace_writer->exit_cond_.Signal(scoped_lock); } } // namespace tracing diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h index b7bdc5ac672ad9..988ed2bb7e7f64 100644 --- a/src/tracing/node_trace_writer.h +++ b/src/tracing/node_trace_writer.h @@ -24,7 +24,7 @@ class NodeTraceWriter : public TraceWriter { void Flush() override; void Flush(bool blocking); - static const int kTracesPerFile = 1 << 20; + static const int kTracesPerFile = 1 << 19; private: struct WriteRequest { @@ -54,6 +54,8 @@ class NodeTraceWriter : public TraceWriter { // Allows blocking calls to Flush() to wait on a condition for // trace events to be written to disk. ConditionVariable request_cond_; + // Used to wait until async handles have been closed. + ConditionVariable exit_cond_; int fd_ = -1; std::queue write_req_queue_; int num_write_requests_ = 0; @@ -63,9 +65,10 @@ class NodeTraceWriter : public TraceWriter { std::ostringstream stream_; TraceWriter* json_trace_writer_; uv_pipe_t trace_file_pipe_; + bool exited_ = false; }; } // namespace tracing } // namespace node -#endif // SRC_NODE_TRACE_WRITER_H_ \ No newline at end of file +#endif // SRC_NODE_TRACE_WRITER_H_ From 8f19f944529457e53935fb0b90b76cc6b7335b70 Mon Sep 17 00:00:00 2001 From: Matthew Loring Date: Wed, 26 Oct 2016 19:19:27 -0400 Subject: [PATCH 06/22] Remove dependency on base trace_event_common.h (#6) --- node.gyp | 1 - src/tracing/node_trace_writer.cc | 1 - src/tracing/trace_event.h | 1119 +++++++++++++++++++++++++++++- 3 files changed, 1118 insertions(+), 3 deletions(-) diff --git a/node.gyp b/node.gyp index 31c1a79719c18c..94ed64604f3d1c 100644 --- a/node.gyp +++ b/node.gyp @@ -136,7 +136,6 @@ 'src', 'tools/msvs/genfiles', 'deps/uv/src/ares', - 'deps/v8/base/trace_event/common', '<(SHARED_INTERMEDIATE_DIR)', # for node_natives.h ], diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index 16bdd31af15d5e..08be67235b9c67 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -2,7 +2,6 @@ #include -#include "trace_event_common.h" #include "util.h" namespace node { diff --git a/src/tracing/trace_event.h b/src/tracing/trace_event.h index bd0279ca2c2dca..a035fd51500bb9 100644 --- a/src/tracing/trace_event.h +++ b/src/tracing/trace_event.h @@ -7,13 +7,1130 @@ #include -#include "trace_event_common.h" #include "v8-platform.h" // This header file defines implementation details of how the trace macros in // trace_event_common.h collect and store trace events. Anything not // implementation-specific should go in trace_macros_common.h instead of here. +// From v8/base/trace_event/common/trace_event_common.h + +// This header file defines the set of trace_event macros without specifying +// how the events actually get collected and stored. If you need to expose trace +// events to some other universe, you can copy-and-paste this file as well as +// trace_event.h, modifying the macros contained there as necessary for the +// target platform. The end result is that multiple libraries can funnel events +// through to a shared trace event collector. + +// IMPORTANT: To avoid conflicts, if you need to modify this file for a library, +// land your change in base/ first, and then copy-and-paste it. + +// Trace events are for tracking application performance and resource usage. +// Macros are provided to track: +// Begin and end of function calls +// Counters +// +// Events are issued against categories. Whereas LOG's +// categories are statically defined, TRACE categories are created +// implicitly with a string. For example: +// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent", +// TRACE_EVENT_SCOPE_THREAD) +// +// It is often the case that one trace may belong in multiple categories at the +// same time. The first argument to the trace can be a comma-separated list of +// categories, forming a category group, like: +// +// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD) +// +// We can enable/disable tracing of OnMouseOver by enabling/disabling either +// category. +// +// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope: +// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly") +// doSomethingCostly() +// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly") +// Note: our tools can't always determine the correct BEGIN/END pairs unless +// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you +// need them to be in separate scopes. +// +// A common use case is to trace entire function scopes. This +// issues a trace BEGIN and END automatically: +// void doSomethingCostly() { +// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly"); +// ... +// } +// +// Additional parameters can be associated with an event: +// void doSomethingCostly2(int howMuch) { +// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly", +// "howMuch", howMuch); +// ... +// } +// +// The trace system will automatically add to this information the +// current process id, thread id, and a timestamp in microseconds. +// +// To trace an asynchronous procedure such as an IPC send/receive, use +// ASYNC_BEGIN and ASYNC_END: +// [single threaded sender code] +// static int send_count = 0; +// ++send_count; +// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count); +// Send(new MyMessage(send_count)); +// [receive code] +// void OnMyMessage(send_count) { +// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count); +// } +// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs. +// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process. +// Pointers can be used for the ID parameter, and they will be mangled +// internally so that the same pointer on two different processes will not +// match. For example: +// class MyTracedClass { +// public: +// MyTracedClass() { +// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this); +// } +// ~MyTracedClass() { +// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this); +// } +// } +// +// Trace event also supports counters, which is a way to track a quantity +// as it varies over time. Counters are created with the following macro: +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue); +// +// Counters are process-specific. The macro itself can be issued from any +// thread, however. +// +// Sometimes, you want to track two counters at once. You can do this with two +// counter macros: +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]); +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]); +// Or you can do it with a combined macro: +// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter", +// "bytesPinned", g_myCounterValue[0], +// "bytesAllocated", g_myCounterValue[1]); +// This indicates to the tracing UI that these counters should be displayed +// in a single graph, as a summed area chart. +// +// Since counters are in a global namespace, you may want to disambiguate with a +// unique ID, by using the TRACE_COUNTER_ID* variations. +// +// By default, trace collection is compiled in, but turned off at runtime. +// Collecting trace data is the responsibility of the embedding +// application. In Chrome's case, navigating to about:tracing will turn on +// tracing and display data collected across all active processes. +// +// +// Memory scoping note: +// Tracing copies the pointers, not the string content, of the strings passed +// in for category_group, name, and arg_names. Thus, the following code will +// cause problems: +// char* str = strdup("importantName"); +// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD! +// free(str); // Trace system now has dangling pointer +// +// To avoid this issue with the |name| and |arg_name| parameters, use the +// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead. +// Notes: The category must always be in a long-lived char* (i.e. static const). +// The |arg_values|, when used, are always deep copied with the _COPY +// macros. +// +// When are string argument values copied: +// const char* arg_values are only referenced by default: +// TRACE_EVENT1("category", "name", +// "arg1", "literal string is only referenced"); +// Use TRACE_STR_COPY to force copying of a const char*: +// TRACE_EVENT1("category", "name", +// "arg1", TRACE_STR_COPY("string will be copied")); +// std::string arg_values are always copied: +// TRACE_EVENT1("category", "name", +// "arg1", std::string("string will be copied")); +// +// +// Convertable notes: +// Converting a large data type to a string can be costly. To help with this, +// the trace framework provides an interface ConvertableToTraceFormat. If you +// inherit from it and implement the AppendAsTraceFormat method the trace +// framework will call back to your object to convert a trace output time. This +// means, if the category for the event is disabled, the conversion will not +// happen. +// +// class MyData : public base::trace_event::ConvertableToTraceFormat { +// public: +// MyData() {} +// void AppendAsTraceFormat(std::string* out) const override { +// out->append("{\"foo\":1}"); +// } +// private: +// ~MyData() override {} +// DISALLOW_COPY_AND_ASSIGN(MyData); +// }; +// +// TRACE_EVENT1("foo", "bar", "data", +// std::unique_ptr(new MyData())); +// +// The trace framework will take ownership if the passed pointer and it will +// be free'd when the trace buffer is flushed. +// +// Note, we only do the conversion when the buffer is flushed, so the provided +// data object should not be modified after it's passed to the trace framework. +// +// +// Thread Safety: +// A thread safe singleton and mutex are used for thread safety. Category +// enabled flags are used to limit the performance impact when the system +// is not enabled. +// +// TRACE_EVENT macros first cache a pointer to a category. The categories are +// statically allocated and safe at all times, even after exit. Fetching a +// category is protected by the TraceLog::lock_. Multiple threads initializing +// the static variable is safe, as they will be serialized by the lock and +// multiple calls will return the same pointer to the category. +// +// Then the category_group_enabled flag is checked. This is a unsigned char, and +// not intended to be multithread safe. It optimizes access to AddTraceEvent +// which is threadsafe internally via TraceLog::lock_. The enabled flag may +// cause some threads to incorrectly call or skip calling AddTraceEvent near +// the time of the system being enabled or disabled. This is acceptable as +// we tolerate some data loss while the system is being enabled/disabled and +// because AddTraceEvent is threadsafe internally and checks the enabled state +// again under lock. +// +// Without the use of these static category pointers and enabled flags all +// trace points would carry a significant performance cost of acquiring a lock +// and resolving the category. + +#if defined(TRACE_EVENT0) +#error "Another copy of this file has already been included." +#endif + +// This will mark the trace event as disabled by default. The user will need +// to explicitly enable the event. +#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name + +// Records a pair of begin and end events called "name" for the current +// scope, with 0, 1 or 2 associated arguments. If the category is not +// enabled, then this does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +#define TRACE_EVENT0(category_group, name) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name) +#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \ + flow_flags) +#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val) +#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags, \ + arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \ + flow_flags, arg1_name, arg1_val) +#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) +#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags, \ + arg1_name, arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \ + flow_flags, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not +// included in official builds. + +#if OFFICIAL_BUILD +#undef TRACING_IS_OFFICIAL_BUILD +#define TRACING_IS_OFFICIAL_BUILD 1 +#elif !defined(TRACING_IS_OFFICIAL_BUILD) +#define TRACING_IS_OFFICIAL_BUILD 0 +#endif + +#if TRACING_IS_OFFICIAL_BUILD +#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0 +#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \ + (void)0 +#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + (void)0 +#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0 +#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \ + arg1_val) \ + (void)0 +#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + (void)0 +#else +#define UNSHIPPED_TRACE_EVENT0(category_group, name) \ + TRACE_EVENT0(category_group, name) +#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \ + TRACE_EVENT1(category_group, name, arg1_name, arg1_val) +#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) +#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \ + TRACE_EVENT_INSTANT0(category_group, name, scope) +#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \ + arg1_val) \ + TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) +#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \ + arg2_name, arg2_val) +#endif + +// Records a single event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +#define TRACE_EVENT_INSTANT0(category_group, name, scope) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + TRACE_EVENT_FLAG_NONE | scope) +#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val) +#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \ + arg2_name, arg2_val) +#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + TRACE_EVENT_FLAG_COPY | scope) +#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \ + timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \ + TRACE_EVENT_FLAG_NONE | scope) + +// Syntactic sugars for the sampling tracing in the main thread. +#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \ + TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name) +#define TRACE_EVENT_GET_SAMPLING_STATE() \ + TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0) +#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \ + TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name) +#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \ + TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName) + +// Records a single BEGIN event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +#define TRACE_EVENT_BEGIN0(category_group, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ + TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ + arg2_name, arg2_val) +#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ + TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided. +// - |id| is used to match the _BEGIN event with the _END event. +// Events are considered to match if their category_group, name and id values +// all match. |id| must either be a pointer or an integer value up to 64 bits. +// If it's a pointer, the bits will be xored with a hash of the process ID so +// that the same pointer on two different processes will not collide. +#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \ + thread_id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \ + category_group, name, id, thread_id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \ + category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2( \ + category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \ + arg2_val) + +// Records a single END event for "name" immediately. If the category +// is not enabled, then this does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +#define TRACE_EVENT_END0(category_group, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ + TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ + arg2_name, arg2_val) +#define TRACE_EVENT_COPY_END0(category_group, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ + TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \ + arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) + +#define TRACE_EVENT_COPY_MARK(category_group, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \ + TRACE_EVENT_FLAG_COPY) + +#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \ + TRACE_EVENT_FLAG_COPY) + +// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided. +// - |id| is used to match the _BEGIN event with the _END event. +// Events are considered to match if their category_group, name and id values +// all match. |id| must either be a pointer or an integer value up to 64 bits. +// If it's a pointer, the bits will be xored with a hash of the process ID so +// that the same pointer on two different processes will not collide. +#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \ + thread_id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \ + category_group, name, id, thread_id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \ + category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2( \ + category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \ + timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \ + arg2_val) + +// Records the value of a counter called "name" immediately. Value +// must be representable as a 32 bit integer. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +#define TRACE_COUNTER1(category_group, name, value) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \ + TRACE_EVENT_FLAG_NONE, "value", \ + static_cast(value)) +#define TRACE_COPY_COUNTER1(category_group, name, value) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \ + TRACE_EVENT_FLAG_COPY, "value", \ + static_cast(value)) + +// Records the values of a multi-parted counter called "name" immediately. +// The UI will treat value1 and value2 as parts of a whole, displaying their +// values as a stacked-bar chart. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \ + value2_name, value2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \ + TRACE_EVENT_FLAG_NONE, value1_name, \ + static_cast(value1_val), value2_name, \ + static_cast(value2_val)) +#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \ + value2_name, value2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \ + TRACE_EVENT_FLAG_COPY, value1_name, \ + static_cast(value1_val), value2_name, \ + static_cast(value2_val)) + +// Similar to TRACE_COUNTERx, but with a custom |timestamp| provided. +#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \ + INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \ + TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \ + TRACE_EVENT_FLAG_NONE, "value", static_cast(value)) + +#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp, \ + value1_name, value1_val, value2_name, \ + value2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \ + TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \ + TRACE_EVENT_FLAG_NONE, value1_name, static_cast(value1_val), \ + value2_name, static_cast(value2_val)) + +// Records the value of a counter called "name" immediately. Value +// must be representable as a 32 bit integer. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - |id| is used to disambiguate counters with the same name. It must either +// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits +// will be xored with a hash of the process ID so that the same pointer on +// two different processes will not collide. +#define TRACE_COUNTER_ID1(category_group, name, id, value) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \ + name, id, TRACE_EVENT_FLAG_NONE, "value", \ + static_cast(value)) +#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \ + name, id, TRACE_EVENT_FLAG_COPY, "value", \ + static_cast(value)) + +// Records the values of a multi-parted counter called "name" immediately. +// The UI will treat value1 and value2 as parts of a whole, displaying their +// values as a stacked-bar chart. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - |id| is used to disambiguate counters with the same name. It must either +// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits +// will be xored with a hash of the process ID so that the same pointer on +// two different processes will not collide. +#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \ + value2_name, value2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + value1_name, static_cast(value1_val), \ + value2_name, static_cast(value2_val)) +#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \ + value1_val, value2_name, value2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + value1_name, static_cast(value1_val), \ + value2_name, static_cast(value2_val)) + +// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler. +#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name, \ + thread_id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \ + TRACE_EVENT_FLAG_NONE) + +#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1( \ + category_group, name, thread_id, timestamp, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) + +#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name, \ + thread_id, timestamp, \ + arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) + +// ASYNC_STEP_* APIs should be only used by legacy code. New code should +// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async +// event. +// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC +// events are considered to match if their category_group, name and id values +// all match. |id| must either be a pointer or an integer value up to 64 bits. +// If it's a pointer, the bits will be xored with a hash of the process ID so +// that the same pointer on two different processes will not collide. +// +// An asynchronous operation can consist of multiple phases. The first phase is +// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the +// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will +// annotate the block following the call. The ASYNC_STEP_PAST macro will +// annotate the block prior to the call. Note that any particular event must use +// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the +// operation completes, call ASYNC_END. +// +// An ASYNC trace typically occurs on a single thread (if not, they will only be +// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that +// operation must use the same |name| and |id|. Each step can have its own +// args. +#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) +#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val) + +// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp +// provided. +#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \ + timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP1( \ + category_group, name, id, timestamp, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val) +#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \ + timestamp, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) +#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \ + timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY) + +// Records a single ASYNC_STEP_INTO event for |step| immediately. If the +// category is not enabled, then this does nothing. The |name| and |id| must +// match the ASYNC_BEGIN event above. The |step| param identifies this step +// within the async event. This should be called at the beginning of the next +// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any +// ASYNC_STEP_PAST events. +#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, "step", step) +#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \ + arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val) + +// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp +// provided. +#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \ + step, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + "step", step) + +// Records a single ASYNC_STEP_PAST event for |step| immediately. If the +// category is not enabled, then this does nothing. The |name| and |id| must +// match the ASYNC_BEGIN event above. The |step| param identifies this step +// within the async event. This should be called at the beginning of the next +// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any +// ASYNC_STEP_INTO events. +#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, "step", step) +#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \ + arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val) + +// Records a single ASYNC_END event for "name" immediately. If the category +// is not enabled, then this does nothing. +#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) +#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val) + +// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided. +#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \ + timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP1(category_group, name, id, \ + timestamp, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val) +#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \ + timestamp, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) + +// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can +// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC +// events. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is +// considered as a match if their category_group, name and id all match. +// - |id| must either be a pointer or an integer value up to 64 bits. +// If it's a pointer, the bits will be xored with a hash of the process ID so +// that the same pointer on two different processes will not collide. +// - |id| is used to match a child NESTABLE_ASYNC event with its parent +// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must +// be logged using the same id and category_group. +// +// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts +// at the first NESTABLE_ASYNC event of that id, and unmatched +// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last +// NESTABLE_ASYNC event of that id. Corresponding warning messages for +// unmatched events will be shown in the analysis view. + +// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with +// 0, 1 or 2 associated arguments. If the category is not enabled, then this +// does nothing. +#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) +// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0 +// or 2 associated arguments. If the category is not enabled, then this does +// nothing. +#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) +// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1 +// associated argument. If the category is not enabled, then this does nothing. +#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) + +// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately, +// with one associated argument. If the category is not enabled, then this +// does nothing. +#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \ + arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately, +// with 2 associated arguments. If the category is not enabled, then this +// does nothing. +#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \ + category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) + +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2( \ + category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ + arg2_name, arg2_val) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2( \ + category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom +// |timestamp| provided. +#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \ + id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE) + +#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \ + id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE) + +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \ + category_group, name, id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \ + category_group, name, id, timestamp) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY) + +// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately, +// with 2 associated arguments. If the category is not enabled, then this +// does nothing. +#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \ + category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) + +// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW +// events are considered to match if their category_group, name and id values +// all match. |id| must either be a pointer or an integer value up to 64 bits. +// If it's a pointer, the bits will be xored with a hash of the process ID so +// that the same pointer on two different processes will not collide. +// FLOW events are different from ASYNC events in how they are drawn by the +// tracing UI. A FLOW defines asynchronous data flow, such as posting a task +// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be +// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar +// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined +// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP +// macros. When the operation completes, call FLOW_END. An async operation can +// span threads and processes, but all events in that operation must use the +// same |name| and |id|. Each event can have its own args. +#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) +#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val) + +// Records a single FLOW_STEP event for |step| immediately. If the category +// is not enabled, then this does nothing. The |name| and |id| must match the +// FLOW_BEGIN event above. The |step| param identifies this step within the +// async event. This should be called at the beginning of the next phase of an +// asynchronous operation. +#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, "step", step) +#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \ + TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, "step", step) +#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val) + +// Records a single FLOW_END event for "name" immediately. If the category +// is not enabled, then this does nothing. +#define TRACE_EVENT_FLOW_END0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \ + name, id, TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \ + name, id, \ + TRACE_EVENT_FLAG_BIND_TO_ENCLOSING) +#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \ + name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \ + arg1_val) +#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \ + name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \ + arg1_val, arg2_name, arg2_val) +#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \ + name, id, TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \ + name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \ + arg1_val) +#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \ + name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \ + arg1_val, arg2_name, arg2_val) + +// Special trace event macro to trace task execution with the location where it +// was posted from. +#define TRACE_TASK_EXECUTION(run_function, task) \ + INTERNAL_TRACE_TASK_EXECUTION(run_function, task) + +// TRACE_EVENT_METADATA* events are information related to other +// injected events, not events in their own right. +#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val) + +// Records a clock sync event. +#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id) \ + INTERNAL_TRACE_EVENT_ADD( \ + TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \ + TRACE_EVENT_FLAG_NONE, "sync_id", sync_id) +#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \ + INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \ + TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \ + issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \ + "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue()) + +// Macros to track the life time and value of arbitrary client objects. +// See also TraceTrackableObject. +#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \ + TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE) + +#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \ + snapshot) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \ + TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot) + +#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \ + category_group, name, id, timestamp, snapshot) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \ + TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \ + TRACE_EVENT_FLAG_NONE, "snapshot", snapshot) + +#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \ + TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE) + +// Records entering and leaving trace event contexts. |category_group| and +// |name| specify the context category and type. |context| is a +// snapshotted context object id. +#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \ + TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \ + TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \ + INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \ + TRACE_ID_DONT_MANGLE(context)) + +// Macro to specify that two trace IDs are identical. For example, +// TRACE_BIND_IDS( +// "category", "name", +// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000), +// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000)) +// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from +// the current process have the same ID as events with ID +// ("blink::ResourceFetcher::FetchRequest", 0x2000). +#define TRACE_BIND_IDS(category_group, name, id, bind_id) \ + INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id); + +// Macro to efficiently determine if a given category group is enabled. +#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \ + do { \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ + *ret = true; \ + } else { \ + *ret = false; \ + } \ + } while (0) + +// Macro to explicitly warm up a given category group. This could be useful in +// cases where we want to initialize a category group before any trace events +// for that category group is reported. For example, to have a category group +// always show up in the "record categories" list for manually selecting +// settings in about://tracing. +#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) + +// Macro to efficiently determine, through polling, if a new trace has begun. +#define TRACE_EVENT_IS_NEW_TRACE(ret) \ + do { \ + static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \ + int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \ + if (num_traces_recorded != -1 && \ + num_traces_recorded != \ + INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \ + INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = num_traces_recorded; \ + *ret = true; \ + } else { \ + *ret = false; \ + } \ + } while (0) + +// Notes regarding the following definitions: +// New values can be added and propagated to third party libraries, but existing +// definitions must never be changed, because third party libraries may use old +// definitions. + +// Phase indicates the nature of an event entry. E.g. part of a begin/end pair. +#define TRACE_EVENT_PHASE_BEGIN ('B') +#define TRACE_EVENT_PHASE_END ('E') +#define TRACE_EVENT_PHASE_COMPLETE ('X') +#define TRACE_EVENT_PHASE_INSTANT ('I') +#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S') +#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T') +#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p') +#define TRACE_EVENT_PHASE_ASYNC_END ('F') +#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b') +#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e') +#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n') +#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s') +#define TRACE_EVENT_PHASE_FLOW_STEP ('t') +#define TRACE_EVENT_PHASE_FLOW_END ('f') +#define TRACE_EVENT_PHASE_METADATA ('M') +#define TRACE_EVENT_PHASE_COUNTER ('C') +#define TRACE_EVENT_PHASE_SAMPLE ('P') +#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N') +#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O') +#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D') +#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v') +#define TRACE_EVENT_PHASE_MARK ('R') +#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c') +#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(') +#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')') +#define TRACE_EVENT_PHASE_BIND_IDS ('=') + +// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT. +#define TRACE_EVENT_FLAG_NONE (static_cast(0)) +#define TRACE_EVENT_FLAG_COPY (static_cast(1 << 0)) +#define TRACE_EVENT_FLAG_HAS_ID (static_cast(1 << 1)) +#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast(1 << 2)) +#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast(1 << 3)) +#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast(1 << 4)) +#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast(1 << 5)) +#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast(1 << 6)) +#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast(1 << 7)) +#define TRACE_EVENT_FLAG_FLOW_IN (static_cast(1 << 8)) +#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast(1 << 9)) +#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast(1 << 10)) +#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast(1 << 11)) + +#define TRACE_EVENT_FLAG_SCOPE_MASK \ + (static_cast(TRACE_EVENT_FLAG_SCOPE_OFFSET | \ + TRACE_EVENT_FLAG_SCOPE_EXTRA)) + +// Type values for identifying types in the TraceValue union. +#define TRACE_VALUE_TYPE_BOOL (static_cast(1)) +#define TRACE_VALUE_TYPE_UINT (static_cast(2)) +#define TRACE_VALUE_TYPE_INT (static_cast(3)) +#define TRACE_VALUE_TYPE_DOUBLE (static_cast(4)) +#define TRACE_VALUE_TYPE_POINTER (static_cast(5)) +#define TRACE_VALUE_TYPE_STRING (static_cast(6)) +#define TRACE_VALUE_TYPE_COPY_STRING (static_cast(7)) +#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast(8)) + +// Enum reflecting the scope of an INSTANT event. Must fit within +// TRACE_EVENT_FLAG_SCOPE_MASK. +#define TRACE_EVENT_SCOPE_GLOBAL (static_cast(0 << 3)) +#define TRACE_EVENT_SCOPE_PROCESS (static_cast(1 << 3)) +#define TRACE_EVENT_SCOPE_THREAD (static_cast(2 << 3)) + +#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g') +#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p') +#define TRACE_EVENT_SCOPE_NAME_THREAD ('t') + // The pointer returned from GetCategoryGroupEnabled() points to a // value with zero or more of the following bits. Used in this class only. From d949bfa5abb7baaa12ea7fd0f4b03968946272b2 Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Thu, 27 Oct 2016 14:44:25 -0700 Subject: [PATCH 07/22] Add end to end test and documentation --- doc/api/debugger.md | 32 +++++++++++++++++++++++++++++++ test/parallel/test-trace-event.js | 29 ++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 test/parallel/test-trace-event.js diff --git a/doc/api/debugger.md b/doc/api/debugger.md index c8a61d2ce742f8..cc76d0d32f858e 100644 --- a/doc/api/debugger.md +++ b/doc/api/debugger.md @@ -202,3 +202,35 @@ To start debugging, open the following URL in Chrome: ``` [TCP-based protocol]: https://github.com/v8/v8/wiki/Debugging-Protocol + +## Trace Event Support for Node.js + +Trace Event provides a mechanism to centralize tracing information generated by +V8, Node core, and userspace code. + +Tracing can be enabled by passing the `--enable-tracing` flag when starting a +Node.js application. + +Tracing can be configured by additionally specifying a configuration file using the +`--trace-config` flag: + +```txt +node --enable-trace --trace-config=trace-config.json server.js +``` + +where `trace-config.json` may override any of the default configuration values: + +```javascript +{ + "record_mode": "record-continuously", + "enable_sampling": true, + "enable_systrace": true, + "enable_argument_filter": true, + "included_categories": [ "v8", "node" ], + "excluded_categories": [ ] +} +``` + +Running Node.js with tracing enabled will produce log files that can be opened +in the [`chrome://tracing`](https://www.chromium.org/developers/how-tos/trace-event-profiling-tool) +tab of Chrome. diff --git a/test/parallel/test-trace-event.js b/test/parallel/test-trace-event.js new file mode 100644 index 00000000000000..ffd4fb26081e36 --- /dev/null +++ b/test/parallel/test-trace-event.js @@ -0,0 +1,29 @@ +'use strict'; + +const assert = require('assert'); +const fs = require('fs'); +const cp = require('child_process'); + +const CODE = 'for (var i = 0; i < 100000; i++) { \"test\" + i }'; +const FILE_NAME = 'node_trace.1.log'; + +fs.access(FILE_NAME, (err) => { + if (!err) { + fs.unlinkSync(FILE_NAME); + } + + const proc = cp.spawn(process.execPath, + [ '--enable-tracing', '-e', CODE ]); + + proc.once('exit', () => { + fs.readFile(FILE_NAME, (err, data) => { + fs.unlinkSync(FILE_NAME); + const traces = JSON.parse(data).traceEvents; + assert(traces.length > 0); + // Values that should be present in all outputs to approximate well-formedness. + assert(traces.some((trace) => { return trace.pid === proc.pid; })); + assert(traces.some((trace) => { return trace.cat === 'v8'; })); + assert(traces.some((trace) => { return trace.name === 'V8.ScriptCompiler'; })); + }); + }); +}); From f0f44bbc5244230d17414c4d90c8eb49f77f37ab Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Thu, 27 Oct 2016 15:24:00 -0700 Subject: [PATCH 08/22] Clean up end to end test --- test/parallel/test-trace-event.js | 35 +++++++++++++++---------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/test/parallel/test-trace-event.js b/test/parallel/test-trace-event.js index ffd4fb26081e36..4d4b475c7bfb22 100644 --- a/test/parallel/test-trace-event.js +++ b/test/parallel/test-trace-event.js @@ -1,29 +1,28 @@ 'use strict'; const assert = require('assert'); -const fs = require('fs'); const cp = require('child_process'); +const common = require('../common.js'); +const fs = require('fs'); const CODE = 'for (var i = 0; i < 100000; i++) { \"test\" + i }'; const FILE_NAME = 'node_trace.1.log'; -fs.access(FILE_NAME, (err) => { - if (!err) { - fs.unlinkSync(FILE_NAME); - } +common.refreshTmpDir(); +process.chdir(common.tmpDir); - const proc = cp.spawn(process.execPath, - [ '--enable-tracing', '-e', CODE ]); +const proc = cp.spawn(process.execPath, + [ '--enable-tracing', '-e', CODE ]); - proc.once('exit', () => { - fs.readFile(FILE_NAME, (err, data) => { - fs.unlinkSync(FILE_NAME); - const traces = JSON.parse(data).traceEvents; - assert(traces.length > 0); - // Values that should be present in all outputs to approximate well-formedness. - assert(traces.some((trace) => { return trace.pid === proc.pid; })); - assert(traces.some((trace) => { return trace.cat === 'v8'; })); - assert(traces.some((trace) => { return trace.name === 'V8.ScriptCompiler'; })); - }); +proc.once('exit', common.mustCall(() => { + assert(common.fileExists(FILE_NAME)); + fs.readFile(FILE_NAME, (err, data) => { + fs.unlinkSync(FILE_NAME); + const traces = JSON.parse(data).traceEvents; + assert(traces.length > 0); + // Values that should be present in all outputs to approximate well-formedness. + assert(traces.some((trace) => { return trace.pid === proc.pid; })); + assert(traces.some((trace) => { return trace.cat === 'v8'; })); + assert(traces.some((trace) => { return trace.name === 'V8.ScriptCompiler'; })); }); -}); +})); From a280056b18627790c8421b9ad558643f6e3ba3ab Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Fri, 28 Oct 2016 09:59:49 -0700 Subject: [PATCH 09/22] More test cleanup --- test/parallel/test-trace-event.js | 1 - 1 file changed, 1 deletion(-) diff --git a/test/parallel/test-trace-event.js b/test/parallel/test-trace-event.js index 4d4b475c7bfb22..f3274405078a53 100644 --- a/test/parallel/test-trace-event.js +++ b/test/parallel/test-trace-event.js @@ -17,7 +17,6 @@ const proc = cp.spawn(process.execPath, proc.once('exit', common.mustCall(() => { assert(common.fileExists(FILE_NAME)); fs.readFile(FILE_NAME, (err, data) => { - fs.unlinkSync(FILE_NAME); const traces = JSON.parse(data).traceEvents; assert(traces.length > 0); // Values that should be present in all outputs to approximate well-formedness. From f3eea4f34cd9a88aee1df986187676d02493dd8a Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Mon, 31 Oct 2016 15:20:31 -0700 Subject: [PATCH 10/22] Move configuration to --enabled-categories command line flag --- doc/api/debugger.md | 20 +---- node.gyp | 1 - src/node.cc | 8 +- src/tracing/agent.cc | 21 ++++-- src/tracing/agent.h | 2 +- src/tracing/node_trace_writer.cc | 4 +- src/tracing/trace_config_parser.cc | 116 ----------------------------- src/tracing/trace_config_parser.h | 43 ----------- 8 files changed, 24 insertions(+), 191 deletions(-) delete mode 100644 src/tracing/trace_config_parser.cc delete mode 100644 src/tracing/trace_config_parser.h diff --git a/doc/api/debugger.md b/doc/api/debugger.md index cc76d0d32f858e..3c769d280b68c4 100644 --- a/doc/api/debugger.md +++ b/doc/api/debugger.md @@ -211,24 +211,12 @@ V8, Node core, and userspace code. Tracing can be enabled by passing the `--enable-tracing` flag when starting a Node.js application. -Tracing can be configured by additionally specifying a configuration file using the -`--trace-config` flag: +The set of categories for which traces are recorded can be specified using the +`--enabled-categories` flag followed by a list of comma separated category names. +By default the `node` and `v8` categories are enabled. ```txt -node --enable-trace --trace-config=trace-config.json server.js -``` - -where `trace-config.json` may override any of the default configuration values: - -```javascript -{ - "record_mode": "record-continuously", - "enable_sampling": true, - "enable_systrace": true, - "enable_argument_filter": true, - "included_categories": [ "v8", "node" ], - "excluded_categories": [ ] -} +node --enable-tracing --enabled-categories=v8,node server.js ``` Running Node.js with tracing enabled will produce log files that can be opened diff --git a/node.gyp b/node.gyp index 94ed64604f3d1c..63aedd7a027dcf 100644 --- a/node.gyp +++ b/node.gyp @@ -143,7 +143,6 @@ 'src/tracing/agent.cc', 'src/tracing/node_trace_buffer.cc', 'src/tracing/node_trace_writer.cc', - 'src/tracing/trace_config_parser.cc', 'src/tracing/trace_event.cc', 'src/debug-agent.cc', 'src/async-wrap.cc', diff --git a/src/node.cc b/src/node.cc index ad1453d462bab7..e9fa44af913f17 100644 --- a/src/node.cc +++ b/src/node.cc @@ -151,7 +151,7 @@ static node_module* modlist_builtin; static node_module* modlist_linked; static node_module* modlist_addon; static bool trace_enabled = false; -static const char* trace_config_file = nullptr; +static const char* trace_enabled_categories = nullptr; #if defined(NODE_HAVE_I18N_SUPPORT) // Path to ICU data (for i18n / Intl) @@ -3632,8 +3632,8 @@ static void ParseArgs(int* argc, trace_sync_io = true; } else if (strcmp(arg, "--enable-tracing") == 0) { trace_enabled = true; - } else if (strncmp(arg, "--trace-config=", 15) == 0) { - trace_config_file = arg + 15; + } else if (strncmp(arg, "--enabled-categories=", 21) == 0) { + trace_enabled_categories = arg + 21; } else if (strcmp(arg, "--track-heap-objects") == 0) { track_heap_objects = true; } else if (strcmp(arg, "--throw-deprecation") == 0) { @@ -4342,7 +4342,7 @@ inline int Start(Isolate* isolate, IsolateData* isolate_data, // Enable tracing when argv has --enable-tracing. if (trace_enabled) { - env.tracing_agent()->Start(v8_platform.platform_, trace_config_file); + env.tracing_agent()->Start(v8_platform.platform_, trace_enabled_categories); } { diff --git a/src/tracing/agent.cc b/src/tracing/agent.cc index 8fdd23f6ce387c..62d7e9076ea961 100644 --- a/src/tracing/agent.cc +++ b/src/tracing/agent.cc @@ -2,14 +2,15 @@ #include "env-inl.h" #include "libplatform/libplatform.h" -#include "tracing/trace_config_parser.h" namespace node { namespace tracing { +using v8::platform::tracing::TraceConfig; + Agent::Agent(Environment* env) : parent_env_(env) {} -void Agent::Start(v8::Platform* platform, const char* trace_config_file) { +void Agent::Start(v8::Platform* platform, const char* enabled_categories) { auto env = parent_env_; platform_ = platform; @@ -23,12 +24,16 @@ void Agent::Start(v8::Platform* platform, const char* trace_config_file) { tracing_controller_ = new TracingController(); TraceConfig* trace_config = new TraceConfig(); - if (trace_config_file) { - std::ifstream fin(trace_config_file); - std::string str((std::istreambuf_iterator(fin)), - std::istreambuf_iterator()); - TraceConfigParser::FillTraceConfig(env->isolate(), trace_config, - str.c_str()); + if (enabled_categories) { + int num_chars = strlen(enabled_categories); + char category_list[num_chars + 1]; + strncpy(category_list, enabled_categories, num_chars); + category_list[num_chars] = '\0'; + char* category = strtok(category_list, ","); + while (category != NULL) { + trace_config->AddIncludedCategory(category); + category = strtok(NULL, ","); + } } else { trace_config->AddIncludedCategory("v8"); trace_config->AddIncludedCategory("node"); diff --git a/src/tracing/agent.h b/src/tracing/agent.h index 34418cb93f7fa9..5f5e5c82805bfd 100644 --- a/src/tracing/agent.h +++ b/src/tracing/agent.h @@ -17,7 +17,7 @@ namespace tracing { class Agent { public: explicit Agent(Environment* env); - void Start(v8::Platform* platform, const char* trace_config_file); + void Start(v8::Platform* platform, const char* enabled_categories); void Stop(); private: diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index 08be67235b9c67..4bfd46e466b0de 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -27,7 +27,7 @@ void NodeTraceWriter::WriteSuffix() { Mutex::ScopedLock scoped_lock(stream_mutex_); if (total_traces_ > 0) { total_traces_ = 0; // so we don't write it again in FlushPrivate - // Appends "]}\n" to stream_. + // Appends "]}" to stream_. delete json_trace_writer_; should_flush = true; } @@ -90,7 +90,7 @@ void NodeTraceWriter::FlushPrivate() { Mutex::ScopedLock stream_scoped_lock(stream_mutex_); if (total_traces_ >= kTracesPerFile) { total_traces_ = 0; - // Destroying the member JSONTraceWriter object appends "]"\n"" to + // Destroying the member JSONTraceWriter object appends "]}" to // stream_ - in other words, ending a JSON file. delete json_trace_writer_; } diff --git a/src/tracing/trace_config_parser.cc b/src/tracing/trace_config_parser.cc deleted file mode 100644 index c18b17a0bb4955..00000000000000 --- a/src/tracing/trace_config_parser.cc +++ /dev/null @@ -1,116 +0,0 @@ -#include "tracing/trace_config_parser.h" - -#include - -namespace node { -namespace tracing { - -using v8::Array; -using v8::Boolean; -using v8::HandleScope; -using v8::JSON; -using v8::NewStringType; -using v8::String; - -// String options that can be used to initialize TraceOptions. -static const char* kRecordUntilFull = "record-until-full"; -static const char* kRecordContinuously = "record-continuously"; -static const char* kRecordAsMuchAsPossible = "record-as-much-as-possible"; - -static const char* kRecordModeParam = "record_mode"; -static const char* kEnableSamplingParam = "enable_sampling"; -static const char* kEnableSystraceParam = "enable_systrace"; -static const char* kEnableArgumentFilterParam = "enable_argument_filter"; -static const char* kIncludedCategoriesParam = "included_categories"; -static const char* kExcludedCategoriesParam = "excluded_categories"; - -void TraceConfigParser::FillTraceConfig( - Isolate* isolate, TraceConfig* trace_config, const char* json_str) { - HandleScope handle_scope(isolate); - Local context = Context::New(isolate); - Context::Scope context_scope(context); - - Local source = - String::NewFromUtf8(isolate, json_str, NewStringType::kNormal) - .ToLocalChecked(); - Local result = JSON::Parse(context, source).ToLocalChecked(); - Local trace_config_object = Local::Cast(result); - - trace_config->SetTraceRecordMode( - GetTraceRecordMode(isolate, context, trace_config_object)); - if (GetBoolean(isolate, context, trace_config_object, kEnableSamplingParam)) - trace_config->EnableSampling(); - if (GetBoolean(isolate, context, trace_config_object, kEnableSystraceParam)) - trace_config->EnableSystrace(); - if (GetBoolean( - isolate, context, trace_config_object, kEnableArgumentFilterParam)) - trace_config->EnableArgumentFilter(); - UpdateCategoriesList(isolate, context, trace_config_object, - kIncludedCategoriesParam, trace_config); - UpdateCategoriesList(isolate, context, trace_config_object, - kExcludedCategoriesParam, trace_config); -} - -bool TraceConfigParser::GetBoolean(Isolate* isolate, Local context, - Local object, const char* property) { - Local value = GetValue(isolate, context, object, property); - if (value->IsNumber()) { - Local v8_boolean = value->ToBoolean(context).ToLocalChecked(); - return v8_boolean->Value(); - } - return false; -} - -int TraceConfigParser::UpdateCategoriesList( - Isolate* isolate, Local context, Local object, - const char* property, TraceConfig* trace_config) { - Local value = GetValue(isolate, context, object, property); - if (value->IsArray()) { - Local v8_array = Local::Cast(value); - for (int i = 0, length = v8_array->Length(); i < length; ++i) { - Local v = v8_array->Get(context, i) - .ToLocalChecked() - ->ToString(context) - .ToLocalChecked(); - String::Utf8Value str(v->ToString(context).ToLocalChecked()); - if (property == kIncludedCategoriesParam) { - trace_config->AddIncludedCategory(*str); - } else if (property == kExcludedCategoriesParam) { - trace_config->AddExcludedCategory(*str); - } - } - return v8_array->Length(); - } - return 0; -} - -TraceRecordMode TraceConfigParser::GetTraceRecordMode(Isolate* isolate, - Local context, - Local object) { - Local value = GetValue(isolate, context, object, kRecordModeParam); - if (value->IsString()) { - Local v8_string = value->ToString(context).ToLocalChecked(); - String::Utf8Value str(v8_string); - if (strcmp(kRecordUntilFull, *str) == 0) { - return TraceRecordMode::RECORD_UNTIL_FULL; - } else if (strcmp(kRecordContinuously, *str) == 0) { - return TraceRecordMode::RECORD_CONTINUOUSLY; - } else if (strcmp(kRecordAsMuchAsPossible, *str) == 0) { - return TraceRecordMode::RECORD_AS_MUCH_AS_POSSIBLE; - } - } - return TraceRecordMode::RECORD_UNTIL_FULL; -} - -Local TraceConfigParser::GetValue(Isolate* isolate, - Local context, - Local object, - const char* property) { - Local v8_str = - String::NewFromUtf8(isolate, property, NewStringType::kNormal) - .ToLocalChecked(); - return object->Get(context, v8_str).ToLocalChecked(); -} - -} // namespace tracing -} // namespace node diff --git a/src/tracing/trace_config_parser.h b/src/tracing/trace_config_parser.h deleted file mode 100644 index 04bf070dde7e36..00000000000000 --- a/src/tracing/trace_config_parser.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef SRC_TRACE_CONFIG_PARSER_H_ -#define SRC_TRACE_CONFIG_PARSER_H_ - -#include "libplatform/v8-tracing.h" -#include "v8.h" - -namespace node { -namespace tracing { - -using v8::platform::tracing::TraceConfig; -using v8::platform::tracing::TraceRecordMode; -using v8::Context; -using v8::Isolate; -using v8::Local; -using v8::Object; -using v8::Value; - -class TraceConfigParser { - public: - static void FillTraceConfig(Isolate* isolate, TraceConfig* trace_config, - const char* json_str); - - private: - static bool GetBoolean(Isolate* isolate, Local context, - Local object, const char* property); - - static int UpdateCategoriesList(Isolate* isolate, Local context, - Local object, - const char* property, - TraceConfig* trace_config); - - static TraceRecordMode GetTraceRecordMode(Isolate* isolate, - Local context, - Local object); - - static Local GetValue(Isolate* isolate, Local context, - Local object, const char* property); -}; - -} // namespace tracing -} // namespace node - -#endif // SRC_TRACE_CONFIG_PARSER_H_ From b8106f3997edcf0366cf351a2d09fa90e0a51024 Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Wed, 9 Nov 2016 16:25:06 -0800 Subject: [PATCH 11/22] Move common to top of test, add flags to --help --- src/node.cc | 3 +++ test/parallel/test-trace-event.js | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/node.cc b/src/node.cc index e9fa44af913f17..aee891baa88410 100644 --- a/src/node.cc +++ b/src/node.cc @@ -3482,6 +3482,9 @@ static void PrintHelp() { " --trace-warnings show stack traces on process warnings\n" " --trace-sync-io show stack trace when use of sync IO\n" " is detected after the first tick\n" + " --enable-tracing track trace events\n" + " --enabled-categories= comma separated list of trace event\n" + " categories to record\n" " --track-heap-objects track heap object allocations for heap " "snapshots\n" " --prof-process process v8 profiler output generated\n" diff --git a/test/parallel/test-trace-event.js b/test/parallel/test-trace-event.js index f3274405078a53..d99ed13f114e0b 100644 --- a/test/parallel/test-trace-event.js +++ b/test/parallel/test-trace-event.js @@ -1,8 +1,7 @@ 'use strict'; - +const common = require('../common.js'); const assert = require('assert'); const cp = require('child_process'); -const common = require('../common.js'); const fs = require('fs'); const CODE = 'for (var i = 0; i < 100000; i++) { \"test\" + i }'; From 0f96093191b40b553dea93bf6a8f3e4ec690864a Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Tue, 15 Nov 2016 14:17:19 -0800 Subject: [PATCH 12/22] Update cli docs --- doc/api/cli.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/doc/api/cli.md b/doc/api/cli.md index d868d70533f5f2..8f4629c93b6fe4 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -129,6 +129,20 @@ added: v2.1.0 Prints a stack trace whenever synchronous I/O is detected after the first turn of the event loop. +### `--enable-tracing` + + +Enables the collection of trace event tracing information. + +### `--enabled-categories=list` + + +A comma separated list of categories that should be traced when trace event +tracing is enabled using `--enable-tracing`. ### `--zero-fill-buffers` Enables the collection of trace event tracing information. -### `--enabled-categories=list` +### `--enabled-categories` A comma separated list of categories that should be traced when trace event diff --git a/src/node.cc b/src/node.cc index aee891baa88410..537bb4087f1dff 100644 --- a/src/node.cc +++ b/src/node.cc @@ -3483,7 +3483,7 @@ static void PrintHelp() { " --trace-sync-io show stack trace when use of sync IO\n" " is detected after the first tick\n" " --enable-tracing track trace events\n" - " --enabled-categories= comma separated list of trace event\n" + " --enabled-categories comma separated list of trace event\n" " categories to record\n" " --track-heap-objects track heap object allocations for heap " "snapshots\n" @@ -3635,8 +3635,14 @@ static void ParseArgs(int* argc, trace_sync_io = true; } else if (strcmp(arg, "--enable-tracing") == 0) { trace_enabled = true; - } else if (strncmp(arg, "--enabled-categories=", 21) == 0) { - trace_enabled_categories = arg + 21; + } else if (strcmp(arg, "--enabled-categories") == 0) { + const char* categories = argv[index + 1]; + if (categories == nullptr) { + fprintf(stderr, "%s: %s requires an argument\n", argv[0], arg); + exit(9); + } + args_consumed += 1; + trace_enabled_categories = categories; } else if (strcmp(arg, "--track-heap-objects") == 0) { track_heap_objects = true; } else if (strcmp(arg, "--throw-deprecation") == 0) { diff --git a/test/parallel/test-trace-event.js b/test/parallel/test-trace-event.js index d99ed13f114e0b..1f39b12e649b80 100644 --- a/test/parallel/test-trace-event.js +++ b/test/parallel/test-trace-event.js @@ -1,10 +1,10 @@ 'use strict'; -const common = require('../common.js'); +const common = require('../common'); const assert = require('assert'); const cp = require('child_process'); const fs = require('fs'); -const CODE = 'for (var i = 0; i < 100000; i++) { \"test\" + i }'; +const CODE = 'for (var i = 0; i < 100000; i++) { "test" + i }'; const FILE_NAME = 'node_trace.1.log'; common.refreshTmpDir(); @@ -18,9 +18,11 @@ proc.once('exit', common.mustCall(() => { fs.readFile(FILE_NAME, (err, data) => { const traces = JSON.parse(data).traceEvents; assert(traces.length > 0); - // Values that should be present in all outputs to approximate well-formedness. + // Values that should be present on all runs to approximate correctness. assert(traces.some((trace) => { return trace.pid === proc.pid; })); assert(traces.some((trace) => { return trace.cat === 'v8'; })); - assert(traces.some((trace) => { return trace.name === 'V8.ScriptCompiler'; })); + assert(traces.some((trace) => { + return trace.name === 'V8.ScriptCompiler'; + })); }); })); From fc0984723a01cd1dd887299e111c2cce79a7422a Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Fri, 18 Nov 2016 11:27:31 -0800 Subject: [PATCH 14/22] Fix windows build --- src/tracing/agent.cc | 16 ++++++++-------- src/tracing/node_trace_buffer.cc | 1 + src/tracing/node_trace_writer.cc | 1 + test/parallel/test-trace-event.js | 2 +- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/tracing/agent.cc b/src/tracing/agent.cc index 62d7e9076ea961..1b07eae9f3ced7 100644 --- a/src/tracing/agent.cc +++ b/src/tracing/agent.cc @@ -1,5 +1,8 @@ #include "tracing/agent.h" +#include +#include + #include "env-inl.h" #include "libplatform/libplatform.h" @@ -25,14 +28,11 @@ void Agent::Start(v8::Platform* platform, const char* enabled_categories) { TraceConfig* trace_config = new TraceConfig(); if (enabled_categories) { - int num_chars = strlen(enabled_categories); - char category_list[num_chars + 1]; - strncpy(category_list, enabled_categories, num_chars); - category_list[num_chars] = '\0'; - char* category = strtok(category_list, ","); - while (category != NULL) { - trace_config->AddIncludedCategory(category); - category = strtok(NULL, ","); + std::stringstream category_list(enabled_categories); + while (category_list.good()) { + std::string category; + getline(category_list, category, ','); + trace_config->AddIncludedCategory(category.c_str()); } } else { trace_config->AddIncludedCategory("v8"); diff --git a/src/tracing/node_trace_buffer.cc b/src/tracing/node_trace_buffer.cc index 640d437f7f5c61..3c4bc46fe1c476 100644 --- a/src/tracing/node_trace_buffer.cc +++ b/src/tracing/node_trace_buffer.cc @@ -129,6 +129,7 @@ TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) { bool NodeTraceBuffer::Flush() { buffer1_.Flush(true); buffer2_.Flush(true); + return true; } // Attempts to set current_buf_ such that it references a buffer that can diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index 4bfd46e466b0de..c3a17997d7af2b 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -1,6 +1,7 @@ #include "tracing/node_trace_writer.h" #include +#include #include "util.h" diff --git a/test/parallel/test-trace-event.js b/test/parallel/test-trace-event.js index 1f39b12e649b80..4bb7c6f3452c92 100644 --- a/test/parallel/test-trace-event.js +++ b/test/parallel/test-trace-event.js @@ -16,7 +16,7 @@ const proc = cp.spawn(process.execPath, proc.once('exit', common.mustCall(() => { assert(common.fileExists(FILE_NAME)); fs.readFile(FILE_NAME, (err, data) => { - const traces = JSON.parse(data).traceEvents; + const traces = JSON.parse(data.toString()).traceEvents; assert(traces.length > 0); // Values that should be present on all runs to approximate correctness. assert(traces.some((trace) => { return trace.pid === proc.pid; })); From 1057bf2761ec36f565e8dc0e5f4b9db568f3238e Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Fri, 18 Nov 2016 13:40:24 -0800 Subject: [PATCH 15/22] Avoid flushing file if no traces were recorded --- src/tracing/node_trace_writer.cc | 3 +++ src/tracing/node_trace_writer.h | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index c3a17997d7af2b..ac21e9f082bbb1 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -120,6 +120,9 @@ void NodeTraceWriter::Flush() { void NodeTraceWriter::Flush(bool blocking) { Mutex::ScopedLock scoped_lock(request_mutex_); + if (!json_trace_writer_) { + return; + } int request_id = ++num_write_requests_; int err = uv_async_send(&flush_signal_); CHECK_EQ(err, 0); diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h index 988ed2bb7e7f64..0e1651ee5cbf0e 100644 --- a/src/tracing/node_trace_writer.h +++ b/src/tracing/node_trace_writer.h @@ -63,7 +63,7 @@ class NodeTraceWriter : public TraceWriter { int total_traces_ = 0; int file_num_ = 0; std::ostringstream stream_; - TraceWriter* json_trace_writer_; + TraceWriter* json_trace_writer_ = nullptr; uv_pipe_t trace_file_pipe_; bool exited_ = false; }; From fdf3ca544d9ad8bb7a9546be235c8e578634766e Mon Sep 17 00:00:00 2001 From: Jason Ginchereau Date: Mon, 21 Nov 2016 16:23:47 -0800 Subject: [PATCH 16/22] Fix trace file writes on Windows --- src/tracing/node_trace_writer.cc | 23 ++++++++++------------- src/tracing/node_trace_writer.h | 8 ++++---- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/src/tracing/node_trace_writer.cc b/src/tracing/node_trace_writer.cc index ac21e9f082bbb1..70ded1d25c4406 100644 --- a/src/tracing/node_trace_writer.cc +++ b/src/tracing/node_trace_writer.cc @@ -46,7 +46,6 @@ NodeTraceWriter::~NodeTraceWriter() { err = uv_fs_close(tracing_loop_, &req, fd_, nullptr); CHECK_EQ(err, 0); uv_fs_req_cleanup(&req); - uv_close(reinterpret_cast(&trace_file_pipe_), nullptr); } uv_async_send(&exit_signal_); Mutex::ScopedLock scoped_lock(request_mutex_); @@ -63,8 +62,6 @@ void NodeTraceWriter::OpenNewFileForStreaming() { O_CREAT | O_WRONLY | O_TRUNC, 0644, NULL); CHECK_NE(fd_, -1); uv_fs_req_cleanup(&req); - uv_pipe_init(tracing_loop_, &trace_file_pipe_, 0); - uv_pipe_open(&trace_file_pipe_, fd_); } void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) { @@ -104,7 +101,7 @@ void NodeTraceWriter::FlushPrivate() { Mutex::ScopedLock request_scoped_lock(request_mutex_); highest_request_id = num_write_requests_; } - WriteToFile(str, highest_request_id); + WriteToFile(std::move(str), highest_request_id); } void NodeTraceWriter::FlushSignalCb(uv_async_t* signal) { @@ -136,12 +133,13 @@ void NodeTraceWriter::Flush(bool blocking) { } } -void NodeTraceWriter::WriteToFile(std::string str, int highest_request_id) { - const char* c_str = str.c_str(); - uv_buf_t uv_buf = uv_buf_init(const_cast(c_str), strlen(c_str)); +void NodeTraceWriter::WriteToFile(std::string&& str, int highest_request_id) { WriteRequest* write_req = new WriteRequest(); + write_req->str = std::move(str); write_req->writer = this; write_req->highest_request_id = highest_request_id; + uv_buf_t uv_buf = uv_buf_init(const_cast(write_req->str.c_str()), + write_req->str.length()); request_mutex_.Lock(); // Manage a queue of WriteRequest objects because the behavior of uv_write is // is undefined if the same WriteRequest object is used more than once @@ -149,16 +147,15 @@ void NodeTraceWriter::WriteToFile(std::string str, int highest_request_id) { // of the latest write request that actually been completed. write_req_queue_.push(write_req); request_mutex_.Unlock(); - // TODO: Is the return value of back() guaranteed to always have the - // same address? - int err = uv_write(reinterpret_cast(write_req), - reinterpret_cast(&trace_file_pipe_), - &uv_buf, 1, WriteCb); + int err = uv_fs_write(tracing_loop_, reinterpret_cast(write_req), + fd_, &uv_buf, 1, -1, WriteCb); CHECK_EQ(err, 0); } -void NodeTraceWriter::WriteCb(uv_write_t* req, int status) { +void NodeTraceWriter::WriteCb(uv_fs_t* req) { WriteRequest* write_req = reinterpret_cast(req); + CHECK_GE(write_req->req.result, 0); + NodeTraceWriter* writer = write_req->writer; int highest_request_id = write_req->highest_request_id; { diff --git a/src/tracing/node_trace_writer.h b/src/tracing/node_trace_writer.h index 0e1651ee5cbf0e..5813db0ab1fd33 100644 --- a/src/tracing/node_trace_writer.h +++ b/src/tracing/node_trace_writer.h @@ -28,14 +28,15 @@ class NodeTraceWriter : public TraceWriter { private: struct WriteRequest { - uv_write_t req; + uv_fs_t req; NodeTraceWriter* writer; + std::string str; int highest_request_id; }; - static void WriteCb(uv_write_t* req, int status); + static void WriteCb(uv_fs_t* req); void OpenNewFileForStreaming(); - void WriteToFile(std::string str, int highest_request_id); + void WriteToFile(std::string&& str, int highest_request_id); void WriteSuffix(); static void FlushSignalCb(uv_async_t* signal); void FlushPrivate(); @@ -64,7 +65,6 @@ class NodeTraceWriter : public TraceWriter { int file_num_ = 0; std::ostringstream stream_; TraceWriter* json_trace_writer_ = nullptr; - uv_pipe_t trace_file_pipe_; bool exited_ = false; }; From 373215328cbd8530771a32e8e097b0b8a7de0050 Mon Sep 17 00:00:00 2001 From: Matt Loring Date: Tue, 29 Nov 2016 22:32:54 -0800 Subject: [PATCH 17/22] Update flags and add unit test --- doc/api/cli.md | 6 +-- doc/api/debugger.md | 22 +------- doc/api/tracing.md | 19 +++++++ src/node.cc | 90 +++++++++++++++---------------- test/parallel/test-trace-event.js | 35 +++++++----- 5 files changed, 89 insertions(+), 83 deletions(-) create mode 100644 doc/api/tracing.md diff --git a/doc/api/cli.md b/doc/api/cli.md index bea89a6ccd55ab..2000c2b3334b74 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -129,20 +129,20 @@ added: v2.1.0 Prints a stack trace whenever synchronous I/O is detected after the first turn of the event loop. -### `--enable-tracing` +### `--trace-events-enabled` Enables the collection of trace event tracing information. -### `--enabled-categories` +### `--trace-event-categories` A comma separated list of categories that should be traced when trace event -tracing is enabled using `--enable-tracing`. +tracing is enabled using `--trace-events-enabled`. ### `--zero-fill-buffers`