[Commits] c0a4e59ba: Add Range Locking counters to PerfContext
revision-id: c0a4e59baa6e46f6e80d6ba049058b367c3a5c94 (v5.8-3394-gc0a4e59ba) parent(s): 1ec5d0e1234f8deda4d135e2d48d0ed7406ea513 author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2021-06-14 22:18:19 +0300 message: Add Range Locking counters to PerfContext Add range_lock_{locks,unlocks,mutex_locks} counters. --- include/rocksdb/perf_context.h | 5 +++++ monitoring/perf_context.cc | 16 ++++++++++++++++ .../lock/range/range_tree/lib/locktree/treenode.cc | 12 +++++++++++- .../lock/range/range_tree/range_tree_lock_manager.cc | 5 +++++ .../lock/range/range_tree/range_tree_lock_tracker.cc | 1 + 5 files changed, 38 insertions(+), 1 deletion(-) diff --git a/include/rocksdb/perf_context.h b/include/rocksdb/perf_context.h index 3d61000cc..ddc551944 100644 --- a/include/rocksdb/perf_context.h +++ b/include/rocksdb/perf_context.h @@ -193,6 +193,11 @@ struct PerfContext { // number of times acquiring a lock was blocked by another transaction. uint64_t key_lock_wait_count; + uint64_t range_lock_locks; + uint64_t range_lock_unlocks; + uint64_t range_lock_mutex_locks; + + // Total time spent in Env filesystem operations. These are only populated // when TimedEnv is used. uint64_t env_new_sequential_file_nanos; diff --git a/monitoring/perf_context.cc b/monitoring/perf_context.cc index 53f502405..84ef0dd43 100644 --- a/monitoring/perf_context.cc +++ b/monitoring/perf_context.cc @@ -97,6 +97,10 @@ PerfContext::PerfContext(const PerfContext& other) { key_lock_wait_time = other.key_lock_wait_time; key_lock_wait_count = other.key_lock_wait_count; + range_lock_locks= other.range_lock_locks; + range_lock_unlocks= other.range_lock_unlocks; + range_lock_mutex_locks= other.range_lock_mutex_locks; + env_new_sequential_file_nanos = other.env_new_sequential_file_nanos; env_new_random_access_file_nanos = other.env_new_random_access_file_nanos; env_new_writable_file_nanos = other.env_new_writable_file_nanos; @@ -194,6 +198,10 @@ PerfContext::PerfContext(PerfContext&& other) noexcept { key_lock_wait_time = other.key_lock_wait_time; key_lock_wait_count = other.key_lock_wait_count; + range_lock_locks= other.range_lock_locks; + range_lock_unlocks= other.range_lock_unlocks; + range_lock_mutex_locks= other.range_lock_mutex_locks; + env_new_sequential_file_nanos = other.env_new_sequential_file_nanos; env_new_random_access_file_nanos = other.env_new_random_access_file_nanos; env_new_writable_file_nanos = other.env_new_writable_file_nanos; @@ -293,6 +301,10 @@ PerfContext& PerfContext::operator=(const PerfContext& other) { key_lock_wait_time = other.key_lock_wait_time; key_lock_wait_count = other.key_lock_wait_count; + range_lock_locks= other.range_lock_locks; + range_lock_unlocks= other.range_lock_unlocks; + range_lock_mutex_locks= other.range_lock_mutex_locks; + env_new_sequential_file_nanos = other.env_new_sequential_file_nanos; env_new_random_access_file_nanos = other.env_new_random_access_file_nanos; env_new_writable_file_nanos = other.env_new_writable_file_nanos; @@ -389,6 +401,10 @@ void PerfContext::Reset() { key_lock_wait_time = 0; key_lock_wait_count = 0; + range_lock_locks= 0; + range_lock_unlocks= 0; + range_lock_mutex_locks= 0; + env_new_sequential_file_nanos = 0; env_new_random_access_file_nanos = 0; env_new_writable_file_nanos = 0; diff --git a/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc b/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc index 8997f634b..bcdaa672f 100644 --- a/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc +++ b/utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc @@ -57,11 +57,21 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include "../portability/toku_race_tools.h" +namespace rocksdb { + +void increment_mutex_lock_counter(); + +} + namespace toku { // TODO: source location info might have to be pulled up one caller // to be useful -void treenode::mutex_lock(void) { toku_mutex_lock(&m_mutex); } +void treenode::mutex_lock(void) { + + rocksdb::increment_mutex_lock_counter(); + toku_mutex_lock(&m_mutex); +} void treenode::mutex_unlock(void) { toku_mutex_unlock(&m_mutex); } diff --git a/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc b/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc index 55a66bc59..1c07f0992 100644 --- a/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc +++ b/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc @@ -47,6 +47,10 @@ void serialize_endpoint(const Endpoint& endp, std::string* buf) { buf->append(endp.slice.data(), endp.slice.size()); } +void increment_mutex_lock_counter() { + PERF_COUNTER_ADD(range_lock_mutex_locks, 1); +} + // Decode the endpoint from the format it is stored in the locktree (DBT) to // one used outside (EndpointWithString) void deserialize_endpoint(const DBT* dbt, EndpointWithString* endp) { @@ -79,6 +83,7 @@ Status RangeTreeLockManager::TryLock(PessimisticTransaction* txn, auto lt = GetLockTreeForCF(column_family_id); + PERF_COUNTER_ADD(range_lock_locks, 1); // Put the key waited on into request's m_extra. See // wait_callback_for_locktree for details. std::string wait_key(start_endp.slice.data(), start_endp.slice.size()); diff --git a/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc b/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc index be1e1478b..d138ed91f 100644 --- a/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc +++ b/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc @@ -112,6 +112,7 @@ void RangeLockList::ReleaseLocks(RangeTreeLockManager *mgr, if (it.second->get_num_ranges()) { auto lt_ptr = mgr->GetLockTreeForCF(it.first); toku::locktree *lt = lt_ptr.get(); + PERF_COUNTER_ADD(range_lock_unlocks, it.second->get_num_ranges()); lt->release_locks((TXNID)txn, it.second.get(), all_trx_locks);
participants (1)
-
psergey