diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp index cbbecea3a6a0a..1a7319d08b7e3 100644 --- a/src/hotspot/os/posix/perfMemory_posix.cpp +++ b/src/hotspot/os/posix/perfMemory_posix.cpp @@ -1085,7 +1085,7 @@ static char* mmap_create_shared(size_t size) { static void unmap_shared(char* addr, size_t bytes) { int res; if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; res = ::munmap(addr, bytes); if (res == 0) { MemTracker::record_virtual_memory_release(addr, bytes); diff --git a/src/hotspot/os/windows/perfMemory_windows.cpp b/src/hotspot/os/windows/perfMemory_windows.cpp index 273814f6572f6..ea07ca21b9824 100644 --- a/src/hotspot/os/windows/perfMemory_windows.cpp +++ b/src/hotspot/os/windows/perfMemory_windows.cpp @@ -1800,7 +1800,7 @@ void PerfMemory::detach(char* addr, size_t bytes) { if (MemTracker::enabled()) { // it does not go through os api, the operation has to record from here - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; remove_file_mapping(addr); MemTracker::record_virtual_memory_release(addr, bytes); } else { diff --git a/src/hotspot/share/gc/z/zNMT.cpp b/src/hotspot/share/gc/z/zNMT.cpp index 1019bcfdd961b..6470ce548b2e0 100644 --- a/src/hotspot/share/gc/z/zNMT.cpp +++ b/src/hotspot/share/gc/z/zNMT.cpp @@ -47,7 +47,7 @@ void ZNMT::unreserve(zaddress_unsafe start, size_t size) { // We are the owner of the reserved memory, and any failure to unreserve // are fatal, so so we don't need to hold a lock while unreserving memory. - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; // The current NMT implementation does not support unreserving a memory // region that was built up from smaller memory reservations. Workaround diff --git a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp index 875db3773005f..ee4d6eb24c425 100644 --- a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp +++ b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp @@ -78,7 +78,7 @@ void JfrNativeMemoryEvent::send_type_events(const Ticks& timestamp) { NMTUsage* usage = get_usage(timestamp); - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) { MemTag mem_tag = NMTUtil::index_to_tag(index); if (mem_tag == mtNone) { // Skip mtNone since it is not really used. diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp index 9179395a45199..2cec1775618f0 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp @@ -348,10 +348,14 @@ void CompilerTypeConstant::serialize(JfrCheckpointWriter& writer) { } void NMTTypeConstant::serialize(JfrCheckpointWriter& writer) { - writer.write_count(mt_number_of_tags); - for (int i = 0; i < mt_number_of_tags; ++i) { + writer.write_count(MemTagFactory::number_of_tags()); + for (int i = 0; i < MemTagFactory::number_of_tags(); ++i) { writer.write_key(i); MemTag mem_tag = NMTUtil::index_to_tag(i); - writer.write(NMTUtil::tag_to_name(mem_tag)); + const char* name = MemTagFactory::human_readable_name_of(mem_tag); + if (name == nullptr) { + name = MemTagFactory::name_of(mem_tag); + } + writer.write(name); } } diff --git a/src/hotspot/share/nmt/contiguousAllocator.cpp b/src/hotspot/share/nmt/contiguousAllocator.cpp new file mode 100644 index 0000000000000..62b83f2604e78 --- /dev/null +++ b/src/hotspot/share/nmt/contiguousAllocator.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "nmt/contiguousAllocator.hpp" +#include "nmt/memTracker.hpp" +#include "runtime/os.hpp" + +char* NMTContiguousAllocator::reserve_virtual_address_range() { + char* addr = os::pd_reserve_memory(_size, false); + assert(addr == nullptr || is_aligned(addr, _chunk_size), "must be"); + + return addr; +} + +char* NMTContiguousAllocator::allocate_chunk(size_t requested_size) { + char* next_offset = this->_offset + requested_size; + + if (next_offset > _start + this->_size) { + return nullptr; + } + + if (next_offset <= _committed_boundary) { + char* addr = _offset; + this->_offset = next_offset; + return addr; + } + // Commit the missing amount of memory in page-sized chunks + size_t bytes_available = _committed_boundary - _offset; + size_t chunk_size_missing = align_up(requested_size - bytes_available, _chunk_size); + + bool success = os::pd_commit_memory(this->_committed_boundary, chunk_size_missing, false); + if (!success) { + return nullptr; + } + + this->_committed_boundary += chunk_size_missing; + + char* addr = this->_offset; + this->_offset = next_offset; + return addr; +} + +NMTContiguousAllocator::~NMTContiguousAllocator() { + if (is_reserved()) { + unreserve(); + } +} diff --git a/src/hotspot/share/nmt/contiguousAllocator.hpp b/src/hotspot/share/nmt/contiguousAllocator.hpp new file mode 100644 index 0000000000000..5b30ff3c7814c --- /dev/null +++ b/src/hotspot/share/nmt/contiguousAllocator.hpp @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "nmt/memTag.hpp" +#include "utilities/align.hpp" +#include "runtime/os.hpp" + +#include +#include +#include + +#ifndef SHARE_NMT_CONTIGUOUSALLOCATOR_HPP +#define SHARE_NMT_CONTIGUOUSALLOCATOR_HPP + +class VirtualMemoryTracker; + +class NMTContiguousAllocator { + friend class ContiguousAllocatorTestFixture; + + char* reserve_virtual_address_range(); + char* allocate_chunk(size_t requested_size); + bool unreserve() { + return os::pd_release_memory(_start, _size); + } + +public: + MemTag _flag; + size_t _size; + size_t _chunk_size; + char* _start; // Start of memory + char* _offset; // Last returned point of allocation + char* _committed_boundary; // Anything below this is paged in, invariant: is_aligned with VM page size + NMTContiguousAllocator(size_t size, MemTag flag) + : _flag(flag), _size(align_up(size, os::vm_page_size())), + _chunk_size(os::vm_page_size()), + _start(reserve_virtual_address_range()), + _offset(_start), + _committed_boundary(_start) {} + + NMTContiguousAllocator(const NMTContiguousAllocator& other) + : _flag(other._flag), + _size(other._size), + _chunk_size(os::vm_page_size()), + _start(reserve_virtual_address_range()), + _offset(_start), + _committed_boundary(_start) { + char* alloc_addr = this->alloc(other._committed_boundary - other._start); + if (alloc_addr == nullptr) { + unreserve(); + _start = nullptr; + _size = 0; + return; + } + size_t bytes_allocated = other._offset - other._start; + memcpy(alloc_addr, other._start, bytes_allocated); + _offset = _start + bytes_allocated; + } + + ~NMTContiguousAllocator(); + + char* alloc(size_t size) { + assert(is_reserved(), "must be"); + return allocate_chunk(size); + } + + size_t size() const { assert(is_reserved(), "must be"); return _size; } + size_t amount_committed() const { assert(is_reserved(), "must be"); return _committed_boundary - _start;} + + char* at_offset(size_t offset) { + assert(is_reserved(), "must be"); + char* loc = _start + offset; + assert(loc < _offset, "must be"); + return loc; + } + + bool is_reserved() const { + return _start != nullptr; + } + + bool reserve_memory() { + if (!is_reserved()) { + char* addr = reserve_virtual_address_range(); + if (addr != nullptr) { + this->_start = addr; + assert(is_aligned(this->_start, this->_chunk_size), "must be"); + this->_offset = _start; + return true; + } + } + return false; + } + + void register_virtual_memory_usage(VirtualMemoryTracker& tracker); +}; + +// A static array which is backed by a NMTContiguousAllocator. +// The IndexType is used in order to minimize the size of index references to this array. +template +class NMTStaticArray { +protected: + using IndexType = IType; + using ThisArray = NMTStaticArray; + NMTContiguousAllocator _allocator; + IndexType _num_allocated; + const static size_t _max_reserved_size = + sizeof(T) * static_cast(std::numeric_limits::max()); + +public: + + NMTStaticArray(size_t size = 0) + : _allocator(size == 0 ? _max_reserved_size : size, mtNMT), + _num_allocated(0) {} + + // Snapshotting constructor + NMTStaticArray(const ThisArray& original) + : _allocator(original._allocator), + _num_allocated(original._num_allocated) {} + + T* adr_at(IndexType index) { + if (_num_allocated <= index) { + IndexType number_of_indices_to_allocate = index - _num_allocated + 1; + char* ret = _allocator.alloc(number_of_indices_to_allocate * sizeof(T)); + if (ret == nullptr) { + return nullptr; + } + _num_allocated += number_of_indices_to_allocate; + // Initialize the memory + T* base = reinterpret_cast(_allocator.at_offset(0)); + for (size_t mm = _num_allocated; mm <= index; mm++) { + new (&base[mm]) T(); + } + } + char* offset = _allocator.at_offset(sizeof(T) * index); + return (T*)offset; + } + + const T* adr_at(IndexType index) const { + return const_cast(this)->adr_at((IndexType)index); + } + + T& operator[](IndexType i) { + return *adr_at(i); + } + + const T& operator[](IndexType i) const { + return *const_cast(this)->adr_at(i); + } + + T& operator[](int i) { + assert(i <= std::numeric_limits::max(), "must be"); + return *adr_at((IndexType)i); + } + + const T& operator[](int i) const { + assert(i <= std::numeric_limits::max(), "must be"); + return *const_cast(this)->adr_at((IndexType)i); + } + + + IndexType number_of_tags_allocated() { + return _num_allocated; + } + + bool is_valid() { + return _allocator.is_reserved(); + } +}; + +#endif // SHARE_NMT_CONTIGUOUSALLOCATOR_HPP diff --git a/src/hotspot/share/nmt/mallocLimit.cpp b/src/hotspot/share/nmt/mallocLimit.cpp index ed479725cf9e7..546188b26dd2e 100644 --- a/src/hotspot/share/nmt/mallocLimit.cpp +++ b/src/hotspot/share/nmt/mallocLimit.cpp @@ -25,6 +25,7 @@ #include "nmt/mallocLimit.hpp" #include "nmt/memTag.hpp" +#include "nmt/memTagFactory.hpp" #include "nmt/nmtCommon.hpp" #include "runtime/java.hpp" #include "runtime/globals.hpp" @@ -32,7 +33,7 @@ #include "utilities/parseInteger.hpp" #include "utilities/ostream.hpp" -MallocLimitSet MallocLimitHandler::_limits; +Deferred MallocLimitHandler::_limits; bool MallocLimitHandler::_have_limit = false; static const char* const MODE_OOM = "oom"; @@ -89,7 +90,7 @@ class ParserHelper { } stringStream ss; ss.print("%.*s", (int)(end - _p), _p); - MemTag mem_tag = NMTUtil::string_to_mem_tag(ss.base()); + MemTag mem_tag = MemTagFactory::tag(ss.freeze()); if (mem_tag != mtNone) { *out = mem_tag; _p = end; @@ -132,28 +133,29 @@ void MallocLimitSet::set_global_limit(size_t s, MallocLimitMode flag) { void MallocLimitSet::set_category_limit(MemTag mem_tag, size_t s, MallocLimitMode flag) { const int i = NMTUtil::tag_to_index(mem_tag); - _cat[i].sz = s; _cat[i].mode = flag; + malloclimit& tag_limit = _cat.at_grow(i); + tag_limit.sz = s; tag_limit.mode = flag; } void MallocLimitSet::reset() { set_global_limit(0, MallocLimitMode::trigger_fatal); _glob.sz = 0; _glob.mode = MallocLimitMode::trigger_fatal; - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { set_category_limit(NMTUtil::index_to_tag(i), 0, MallocLimitMode::trigger_fatal); } } -void MallocLimitSet::print_on(outputStream* st) const { +void MallocLimitSet::print_on(outputStream* st) { static const char* flagnames[] = { MODE_FATAL, MODE_OOM }; if (_glob.sz > 0) { st->print_cr("MallocLimit: total limit: " PROPERFMT " (%s)", PROPERFMTARGS(_glob.sz), mode_to_name(_glob.mode)); } else { - for (int i = 0; i < mt_number_of_tags; i++) { - if (_cat[i].sz > 0) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { + if (_cat.at_grow(i).sz > 0) { st->print_cr("MallocLimit: category \"%s\" limit: " PROPERFMT " (%s)", - NMTUtil::tag_to_enum_name(NMTUtil::index_to_tag(i)), - PROPERFMTARGS(_cat[i].sz), mode_to_name(_cat[i].mode)); + MemTagFactory::name_of(NMTUtil::index_to_tag(i)), + PROPERFMTARGS(_cat.at_grow(i).sz), mode_to_name(_cat.at_grow(i).mode)); } } } @@ -192,7 +194,7 @@ bool MallocLimitSet::parse_malloclimit_option(const char* v, const char** err) { BAIL_UNLESS(sst.match_category(&mem_tag), "Expected category name"); BAIL_UNLESS(sst.match_char(':'), "Expected colon following category"); - malloclimit* const modified_limit = &_cat[NMTUtil::tag_to_index(mem_tag)]; + malloclimit* const modified_limit = &_cat.at_grow(NMTUtil::tag_to_index(mem_tag)); // Match size BAIL_UNLESS(sst.match_size(&modified_limit->sz), "Expected size"); @@ -213,9 +215,10 @@ bool MallocLimitSet::parse_malloclimit_option(const char* v, const char** err) { void MallocLimitHandler::initialize(const char* options) { _have_limit = false; + _limits.initialize(); if (options != nullptr && options[0] != '\0') { const char* err = nullptr; - if (!_limits.parse_malloclimit_option(options, &err)) { + if (!_limits->parse_malloclimit_option(options, &err)) { vm_exit_during_initialization("Failed to parse MallocLimit", err); } _have_limit = true; @@ -224,7 +227,7 @@ void MallocLimitHandler::initialize(const char* options) { void MallocLimitHandler::print_on(outputStream* st) { if (have_limit()) { - _limits.print_on(st); + _limits->print_on(st); } else { st->print_cr("MallocLimit: unset"); } diff --git a/src/hotspot/share/nmt/mallocLimit.hpp b/src/hotspot/share/nmt/mallocLimit.hpp index ec6799b41a392..f3e443d616864 100644 --- a/src/hotspot/share/nmt/mallocLimit.hpp +++ b/src/hotspot/share/nmt/mallocLimit.hpp @@ -28,8 +28,11 @@ #include "memory/allStatic.hpp" #include "nmt/memTag.hpp" +#include "nmt/nmtCommon.hpp" #include "utilities/debug.hpp" +#include "utilities/deferred.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/growableArray.hpp" enum class MallocLimitMode { trigger_fatal = 0, @@ -39,14 +42,23 @@ enum class MallocLimitMode { struct malloclimit { size_t sz; // Limit size MallocLimitMode mode; // Behavior flags + + malloclimit() : sz(0), mode(MallocLimitMode::trigger_fatal) {} + malloclimit& operator=(const malloclimit& other) { + this->sz = other.sz; this->mode = other.mode; + return *this; + } + malloclimit(const malloclimit& other) { + *this = other; + } }; // forward declaration class outputStream; class MallocLimitSet { - malloclimit _glob; // global limit - malloclimit _cat[mt_number_of_tags]; // per-category limit + malloclimit _glob; // global limit + GrowableArrayCHeap _cat; // per-category limit public: MallocLimitSet(); @@ -57,19 +69,22 @@ class MallocLimitSet { void set_category_limit(MemTag mem_tag, size_t s, MallocLimitMode mode); const malloclimit* global_limit() const { return &_glob; } - const malloclimit* category_limit(MemTag mem_tag) const { return &_cat[(int)mem_tag]; } + const malloclimit* category_limit(MemTag mem_tag) { + _cat.at_grow(NMTUtil::tag_to_index(mem_tag)); + return &_cat.at(NMTUtil::tag_to_index(mem_tag)); + } - void print_on(outputStream* st) const; + void print_on(outputStream* st); }; class MallocLimitHandler : public AllStatic { - static MallocLimitSet _limits; + static Deferred _limits; static bool _have_limit; // shortcut public: - static const malloclimit* global_limit() { return _limits.global_limit(); } - static const malloclimit* category_limit(MemTag mem_tag) { return _limits.category_limit(mem_tag); } + static const malloclimit* global_limit() { return _limits->global_limit(); } + static const malloclimit* category_limit(MemTag mem_tag) { return _limits->category_limit(mem_tag); } static void initialize(const char* options); static void print_on(outputStream* st); diff --git a/src/hotspot/share/nmt/mallocTracker.cpp b/src/hotspot/share/nmt/mallocTracker.cpp index 6a2da5f79cd8e..e79d280da71cc 100644 --- a/src/hotspot/share/nmt/mallocTracker.cpp +++ b/src/hotspot/share/nmt/mallocTracker.cpp @@ -34,18 +34,20 @@ #include "nmt/mallocSiteTable.hpp" #include "nmt/mallocTracker.hpp" #include "nmt/memTracker.hpp" +#include "nmt/virtualMemoryTracker.hpp" #include "runtime/arguments.hpp" #include "runtime/atomic.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "runtime/safefetch.hpp" #include "utilities/debug.hpp" +#include "utilities/deferred.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" #include "utilities/vmError.hpp" #include "utilities/globalDefinitions.hpp" -MallocMemorySnapshot MallocMemorySummary::_snapshot; +Deferred MallocMemorySummary::_snapshot; void MemoryCounter::update_peak(size_t size, size_t cnt) { size_t peak_sz = peak_size(); @@ -61,27 +63,29 @@ void MemoryCounter::update_peak(size_t size, size_t cnt) { } } -void MallocMemorySnapshot::copy_to(MallocMemorySnapshot* s) { +void MallocMemorySnapshot::copy_to(MallocMemorySnapshot** result) { // Use lock to make sure that mtChunks don't get deallocated while the // copy is going on, because their size is adjusted using this // buffer in make_adjustment(). + MallocMemorySnapshot* s = new MallocMemorySnapshot(*this); ChunkPoolLocker lock; s->_all_mallocs = _all_mallocs; size_t total_size = 0; size_t total_count = 0; - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index _malloc[index] = _malloc[index]; total_size += s->_malloc[index].malloc_size(); total_count += s->_malloc[index].malloc_count(); } // malloc counters may be updated concurrently s->_all_mallocs.set_size_and_count(total_size, total_count); + *result = s; } // Total malloc'd memory used by arenas size_t MallocMemorySnapshot::total_arena() const { size_t amount = 0; - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) { amount += _malloc[index].arena_size(); } return amount; @@ -91,14 +95,17 @@ size_t MallocMemorySnapshot::total_arena() const { // from total chunks to get total free chunk size void MallocMemorySnapshot::make_adjustment() { size_t arena_size = total_arena(); - int chunk_idx = NMTUtil::tag_to_index(mtChunk); - _malloc[chunk_idx].record_free(arena_size); + _malloc[mtChunk].record_free(arena_size); _all_mallocs.deallocate(arena_size); } -void MallocMemorySummary::initialize() { - // Uses placement new operator to initialize static area. +bool MallocMemorySummary::initialize() { + _snapshot.initialize(); + if (!_snapshot->is_valid()) { + return false; + } MallocLimitHandler::initialize(MallocLimit); + return true; } bool MallocMemorySummary::total_limit_reached(size_t s, size_t so_far, const malloclimit* limit) { @@ -132,7 +139,7 @@ bool MallocMemorySummary::category_limit_reached(MemTag mem_tag, size_t s, size_ #define FORMATTED \ "MallocLimit: reached category \"%s\" limit (triggering allocation size: " PROPERFMT ", allocated so far: " PROPERFMT ", limit: " PROPERFMT ") ", \ - NMTUtil::tag_to_enum_name(mem_tag), PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz) + MemTagFactory::name_of(mem_tag), PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz) // If we hit the limit during error reporting, we print a short warning but otherwise ignore it. // We don't want to risk recursive assertion or torn hs-err logs. @@ -157,9 +164,11 @@ bool MallocMemorySummary::category_limit_reached(MemTag mem_tag, size_t s, size_ bool MallocTracker::initialize(NMT_TrackingLevel level) { if (level >= NMT_summary) { - MallocMemorySummary::initialize(); + bool success = MallocMemorySummary::initialize(); + if (!success) { + return false; + } } - if (level == NMT_detail) { return MallocSiteTable::initialize(); } @@ -299,7 +308,7 @@ bool MallocTracker::print_pointer_information(const void* p, outputStream* st) { p2i(p), where, (block->is_dead() ? "dead" : "live"), p2i(block + 1), // lets print the payload start, not the header - block->size(), NMTUtil::tag_to_enum_name(block->mem_tag())); + block->size(), MemTagFactory::name_of(block->mem_tag())); if (MemTracker::tracking_level() == NMT_detail) { NativeCallStack ncs; if (MallocSiteTable::access_stack(ncs, *block)) { @@ -314,3 +323,13 @@ bool MallocTracker::print_pointer_information(const void* p, outputStream* st) { return false; } + +void MallocMemorySnapshot::MemTagArray::register_virtual_memory_usage(VirtualMemoryTracker& tracker) { + address base = (address)_allocator.at_offset(0); + tracker.add_reserved_region(base, _allocator.size(), CALLER_PC, mtNMT); + tracker.add_committed_region(base, _allocator.amount_committed(), CALLER_PC); +} + +void MallocMemorySnapshot::register_virtual_memory_usage(VirtualMemoryTracker& tracker) { + this->_malloc.register_virtual_memory_usage(tracker); +} diff --git a/src/hotspot/share/nmt/mallocTracker.hpp b/src/hotspot/share/nmt/mallocTracker.hpp index e71c9374d4b58..f2d7521e42ced 100644 --- a/src/hotspot/share/nmt/mallocTracker.hpp +++ b/src/hotspot/share/nmt/mallocTracker.hpp @@ -29,10 +29,16 @@ #include "nmt/mallocHeader.hpp" #include "nmt/memTag.hpp" #include "nmt/nmtCommon.hpp" +#include "nmt/contiguousAllocator.hpp" #include "runtime/atomic.hpp" +#include "utilities/deferred.hpp" #include "utilities/nativeCallStack.hpp" +#include +#include + class outputStream; +class VirtualMemoryTracker; struct malloclimit; /* @@ -144,24 +150,55 @@ class MallocMemory { class MallocMemorySummary; // A snapshot of malloc'd memory, includes malloc memory -// usage by tags and memory used by tracking itself. -class MallocMemorySnapshot { +// usage by types and memory used by tracking itself. +class MallocMemorySnapshot : public CHeapObj { friend class MallocMemorySummary; - private: - MallocMemory _malloc[mt_number_of_tags]; - MemoryCounter _all_mallocs; +public: + class MemTagArray : public NMTStaticArray> { + using ThisArray = NMTStaticArray>; + public: + void register_virtual_memory_usage(VirtualMemoryTracker& tracker); + + MallocMemory& at(MemTag mem_tag) { + return *this->adr_at((ThisArray::IndexType)mem_tag); + } + + const MallocMemory& at(MemTag mem_tag) const { + return *this->adr_at((ThisArray::IndexType)mem_tag); + } + + // Do not hide the operator[] overloads from the base class, extend them. + using ThisArray::operator[]; + + MallocMemory& operator[](MemTag mem_tag) { + return at(mem_tag); + } + const MallocMemory& operator[](MemTag mem_tag) const { + return at(mem_tag); + } + }; + +private: + MemTagArray _malloc; + MemoryCounter _all_mallocs; + +public: + MallocMemorySnapshot() + : _malloc(), _all_mallocs() {} + + MallocMemorySnapshot(MallocMemorySnapshot& snap) + : _malloc(snap._malloc), _all_mallocs() {} + + bool is_valid() { return _malloc.is_valid(); } - public: inline MallocMemory* by_tag(MemTag mem_tag) { - int index = NMTUtil::tag_to_index(mem_tag); - return &_malloc[index]; + return &_malloc[mem_tag]; } inline const MallocMemory* by_tag(MemTag mem_tag) const { - int index = NMTUtil::tag_to_index(mem_tag); - return &_malloc[index]; + return &_malloc[mem_tag]; } inline size_t malloc_overhead() const { @@ -191,11 +228,15 @@ class MallocMemorySnapshot { // Total malloc'd memory used by arenas size_t total_arena() const; - void copy_to(MallocMemorySnapshot* s); + void copy_to(MallocMemorySnapshot** s); // Make adjustment by subtracting chunks used by arenas // from total chunks to get total free chunk size void make_adjustment(); + + // The MallocTracker may allocate before the VirtualMemoryTracker has been instantiated. + // Because of this, the malloc tracker registers its virtual memory usage to a VMT when requested. + void register_virtual_memory_usage(VirtualMemoryTracker& tracker); }; /* @@ -204,7 +245,7 @@ class MallocMemorySnapshot { class MallocMemorySummary : AllStatic { private: // Reserve memory for placement of MallocMemorySnapshot object - static MallocMemorySnapshot _snapshot; + static Deferred _snapshot; static bool _have_limits; // Called when a total limit break was detected. @@ -216,7 +257,7 @@ class MallocMemorySummary : AllStatic { static bool category_limit_reached(MemTag mem_tag, size_t s, size_t so_far, const malloclimit* limit); public: - static void initialize(); + static bool initialize(); static inline void record_malloc(size_t size, MemTag mem_tag) { as_snapshot()->by_tag(mem_tag)->record_malloc(size); @@ -240,9 +281,10 @@ class MallocMemorySummary : AllStatic { as_snapshot()->by_tag(mem_tag)->record_arena_size_change(size); } - static void snapshot(MallocMemorySnapshot* s) { + static void snapshot(MallocMemorySnapshot** s) { + *s = new MallocMemorySnapshot(*as_snapshot()); as_snapshot()->copy_to(s); - s->make_adjustment(); + (*s)->make_adjustment(); } // The memory used by malloc tracking headers @@ -251,7 +293,7 @@ class MallocMemorySummary : AllStatic { } static MallocMemorySnapshot* as_snapshot() { - return &_snapshot; + return _snapshot.get(); } // MallocLimit: returns true if allocating s bytes on f would trigger diff --git a/src/hotspot/share/nmt/memBaseline.cpp b/src/hotspot/share/nmt/memBaseline.cpp index 5f5431b765f64..a28cdeebf6251 100644 --- a/src/hotspot/share/nmt/memBaseline.cpp +++ b/src/hotspot/share/nmt/memBaseline.cpp @@ -140,7 +140,7 @@ void MemBaseline::baseline_summary() { MallocMemorySummary::snapshot(&_malloc_memory_snapshot); VirtualMemorySummary::snapshot(&_virtual_memory_snapshot); { - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; MemoryFileTracker::Instance::summary_snapshot(&_virtual_memory_snapshot); } diff --git a/src/hotspot/share/nmt/memBaseline.hpp b/src/hotspot/share/nmt/memBaseline.hpp index 2fff4cc666c37..ecf900bb8f972 100644 --- a/src/hotspot/share/nmt/memBaseline.hpp +++ b/src/hotspot/share/nmt/memBaseline.hpp @@ -58,7 +58,7 @@ class MemBaseline { private: // Summary information - MallocMemorySnapshot _malloc_memory_snapshot; + MallocMemorySnapshot* _malloc_memory_snapshot; VirtualMemorySnapshot _virtual_memory_snapshot; MetaspaceCombinedStats _metaspace_stats; @@ -86,7 +86,10 @@ class MemBaseline { // create a memory baseline MemBaseline(): _instance_class_count(0), _array_class_count(0), _thread_count(0), - _baseline_type(Not_baselined) { + _baseline_type(Not_baselined) {} + + ~MemBaseline() { + delete _malloc_memory_snapshot; } void baseline(bool summaryOnly = true); @@ -94,7 +97,7 @@ class MemBaseline { BaselineType baseline_type() const { return _baseline_type; } MallocMemorySnapshot* malloc_memory_snapshot() { - return &_malloc_memory_snapshot; + return _malloc_memory_snapshot; } VirtualMemorySnapshot* virtual_memory_snapshot() { @@ -119,7 +122,7 @@ class MemBaseline { // memory size_t total_reserved_memory() const { assert(baseline_type() != Not_baselined, "Not yet baselined"); - size_t amount = _malloc_memory_snapshot.total() + + size_t amount = _malloc_memory_snapshot->total() + _virtual_memory_snapshot.total_reserved(); return amount; } @@ -128,25 +131,25 @@ class MemBaseline { // virtual memory size_t total_committed_memory() const { assert(baseline_type() != Not_baselined, "Not yet baselined"); - size_t amount = _malloc_memory_snapshot.total() + + size_t amount = _malloc_memory_snapshot->total() + _virtual_memory_snapshot.total_committed(); return amount; } size_t total_arena_memory() const { assert(baseline_type() != Not_baselined, "Not yet baselined"); - return _malloc_memory_snapshot.total_arena(); + return _malloc_memory_snapshot->total_arena(); } size_t malloc_tracking_overhead() const { assert(baseline_type() != Not_baselined, "Not yet baselined"); MemBaseline* bl = const_cast(this); - return bl->_malloc_memory_snapshot.malloc_overhead(); + return bl->_malloc_memory_snapshot->malloc_overhead(); } MallocMemory* malloc_memory(MemTag mem_tag) { assert(baseline_type() != Not_baselined, "Not yet baselined"); - return _malloc_memory_snapshot.by_tag(mem_tag); + return _malloc_memory_snapshot->by_tag(mem_tag); } VirtualMemory* virtual_memory(MemTag mem_tag) { diff --git a/src/hotspot/share/nmt/memMapPrinter.cpp b/src/hotspot/share/nmt/memMapPrinter.cpp index 515d3804638af..38a4d1ab84826 100644 --- a/src/hotspot/share/nmt/memMapPrinter.cpp +++ b/src/hotspot/share/nmt/memMapPrinter.cpp @@ -70,7 +70,7 @@ static const char* get_shortname_for_mem_tag(MemTag mem_tag) { #define DO(t, shortname, text) if (t == mem_tag) return shortname; NMT_FLAGS_DO(DO) #undef DO - return NMTUtil::tag_to_enum_name(mem_tag); + return MemTagFactory::name_of(mem_tag); } /// NMT virtual memory @@ -249,7 +249,7 @@ bool MappingPrintSession::print_nmt_info_for_region(const void* vma_from, const // Correlate vma region (from, to) with NMT region(s) we collected previously. const MemTagBitmap flags = _nmt_info.lookup(vma_from, vma_to); if (flags.has_any()) { - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { const MemTag mem_tag = (MemTag)i; if (flags.has_tag(mem_tag)) { if (num_printed > 0) { diff --git a/src/hotspot/share/nmt/memReporter.cpp b/src/hotspot/share/nmt/memReporter.cpp index f7d7e121462f5..5d666724f5b29 100644 --- a/src/hotspot/share/nmt/memReporter.cpp +++ b/src/hotspot/share/nmt/memReporter.cpp @@ -80,7 +80,7 @@ void MemReporterBase::print_malloc(const MemoryCounter* c, MemTag mem_tag) const if (mem_tag != mtNone) { out->print("(%s%zu%s tag=%s", alloc_type, - amount_in_current_scale(amount), scale, NMTUtil::tag_to_name(mem_tag)); + amount_in_current_scale(amount), scale, MemTagFactory::human_readable_name_of(mem_tag)); } else { out->print("(%s%zu%s", alloc_type, amount_in_current_scale(amount), scale); @@ -177,7 +177,7 @@ void MemSummaryReporter::report() { out->cr(); // Summary by memory tag - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) { MemTag mem_tag = NMTUtil::index_to_tag(index); // thread stack is reported as part of thread category if (mem_tag == mtThreadStack) continue; @@ -219,7 +219,7 @@ void MemSummaryReporter::report_summary_of_tag(MemTag mem_tag, outputStream* out = output(); const char* scale = current_scale(); constexpr int indent = 28; - out->print("-%*s (", indent - 2, NMTUtil::tag_to_name(mem_tag)); + out->print("-%*s (", indent - 2, MemTagFactory::human_readable_name_of(mem_tag)); print_total(reserved_amount, committed_amount); #if INCLUDE_CDS if (mem_tag == mtClassShared) { @@ -380,7 +380,7 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() { print_total(virtual_memory_site->reserved(), virtual_memory_site->committed()); const MemTag mem_tag = virtual_memory_site->mem_tag(); if (mem_tag != mtNone) { - out->print(" Tag=%s", NMTUtil::tag_to_name(mem_tag)); + out->print(" Tag=%s", MemTagFactory::human_readable_name_of(mem_tag)); } out->print_cr(")"); ) @@ -423,7 +423,7 @@ void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* const char* region_type = (all_committed ? "reserved and committed" : "reserved"); out->cr(); print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size()); - out->print(" for %s", NMTUtil::tag_to_name(reserved_rgn->mem_tag())); + out->print(" for %s", MemTagFactory::human_readable_name_of(reserved_rgn->mem_tag())); if (stack->is_empty()) { out->cr(); } else { @@ -465,7 +465,7 @@ void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* void MemDetailReporter::report_memory_file_allocations() { stringStream st; { - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; MemoryFileTracker::Instance::print_all_reports_on(&st, scale()); } output()->print_raw(st.freeze()); @@ -520,7 +520,7 @@ void MemSummaryDiffReporter::report_diff() { out->cr(); // Summary diff by memory tag - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) { MemTag mem_tag = NMTUtil::index_to_tag(index); // thread stack is reported as part of thread category if (mem_tag == mtThreadStack) continue; @@ -543,7 +543,7 @@ void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t cur out->print("%s%zu%s", alloc_tag, amount_in_current_scale(current_amount), scale); // Report type only if it is valid and not under "thread" category if (mem_tag != mtNone && mem_tag != mtThread) { - out->print(" type=%s", NMTUtil::tag_to_name(mem_tag)); + out->print(" type=%s", MemTagFactory::human_readable_name_of(mem_tag)); } int64_t amount_diff = diff_in_current_scale(current_amount, early_amount); @@ -636,7 +636,7 @@ void MemSummaryDiffReporter::diff_summary_of_tag(MemTag mem_tag, diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) { // print summary line - out->print("-%*s (", indent - 2, NMTUtil::tag_to_name(mem_tag)); + out->print("-%*s (", indent - 2, MemTagFactory::human_readable_name_of(mem_tag)); print_virtual_memory_diff(current_reserved_amount, current_committed_amount, early_reserved_amount, early_committed_amount); out->print_cr(")"); @@ -937,7 +937,7 @@ void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stac out->print("(mmap: "); print_virtual_memory_diff(current_reserved, current_committed, early_reserved, early_committed); if (mem_tag != mtNone) { - out->print(" Type=%s", NMTUtil::tag_to_name(mem_tag)); + out->print(" Type=%s", MemTagFactory::human_readable_name_of(mem_tag)); } out->print_cr(")"); ) diff --git a/src/hotspot/share/nmt/memTag.hpp b/src/hotspot/share/nmt/memTag.hpp index 9255645638d83..f1eb32bbcce68 100644 --- a/src/hotspot/share/nmt/memTag.hpp +++ b/src/hotspot/share/nmt/memTag.hpp @@ -66,8 +66,6 @@ mem_tag, enum class MemTag : uint8_t { MEMORY_TAG_DO(MEMORY_TAG_DECLARE_ENUM) - mt_number_of_tags // number of memory tags (mtDontTrack - // is not included as validate tag) }; #define MEMORY_TAG_SHORTNAME(mem_tag, human_readable) \ @@ -76,7 +74,4 @@ enum class MemTag : uint8_t { // Generate short aliases for the enum values. E.g. mtGC instead of MemTag::mtGC. MEMORY_TAG_DO(MEMORY_TAG_SHORTNAME) -// Make an int version of the sentinel end value. -constexpr int mt_number_of_tags = static_cast(MemTag::mt_number_of_tags); - #endif // SHARE_NMT_MEM_TAG_HPP diff --git a/src/hotspot/share/nmt/memTagBitmap.hpp b/src/hotspot/share/nmt/memTagBitmap.hpp index f65dce60fa62b..f58ae53da6c00 100644 --- a/src/hotspot/share/nmt/memTagBitmap.hpp +++ b/src/hotspot/share/nmt/memTagBitmap.hpp @@ -32,7 +32,8 @@ class MemTagBitmap { uint32_t _v; - STATIC_ASSERT(sizeof(_v) * BitsPerByte >= mt_number_of_tags); + // TODO: + // STATIC_ASSERT(sizeof(_v) * BitsPerByte >= MemTagFactory::number_of_tags()); public: MemTagBitmap(uint32_t v = 0) : _v(v) {} diff --git a/src/hotspot/share/nmt/memTagFactory.cpp b/src/hotspot/share/nmt/memTagFactory.cpp new file mode 100644 index 0000000000000..331812d83909f --- /dev/null +++ b/src/hotspot/share/nmt/memTagFactory.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include + +Deferred MemTagFactory::_instance; diff --git a/src/hotspot/share/nmt/memTagFactory.hpp b/src/hotspot/share/nmt/memTagFactory.hpp new file mode 100644 index 0000000000000..4f0f111ac5c93 --- /dev/null +++ b/src/hotspot/share/nmt/memTagFactory.hpp @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Dual-mapping tag to name and name to tag +// where strings are malloc-allocated +#include "memory/allocation.hpp" +#include "memory/allocation.inline.hpp" +#include "nmt/memTag.hpp" +#include "nmt/nmtCommon.hpp" +#include "nmt/nmtLocker.hpp" +#include "utilities/debug.hpp" +#include "utilities/deferred.hpp" +#include "utilities/growableArray.hpp" +#include "utilities/resourceHash.hpp" + +#include +#include + +#ifndef SHARE_NMT_MEMTAGFACTORY_HPP +#define SHARE_NMT_MEMTAGFACTORY_HPP + +struct NameToTagTable { + using EntryRef = std::underlying_type_t; + constexpr static const EntryRef Nil = std::numeric_limits::max() - 1; + + static constexpr const auto nr_of_buckets = 128; + + using MemTagI = std::underlying_type_t; + MemTagI index(MemTag tag) { + return static_cast(tag); + } + + struct Entry { + MemTag tag; + EntryRef next; + + Entry(MemTag tag, EntryRef next) + : tag(tag), + next(next) { + } + Entry() + : next(Nil) { + } + }; + + GrowableArrayCHeap entries; + const int table_size; + EntryRef* table; + GrowableArrayCHeap names; + GrowableArrayCHeap human_readable_names; + + NameToTagTable() + : entries(), + table_size(nr_of_buckets), + table(nullptr), + names(), human_readable_names() { + table = NEW_C_HEAP_ARRAY(EntryRef, table_size, mtNMT); + for (int i = 0; i < table_size; i++) { + table[i] = Nil; + } + } + + // string hash taken from libadt and made worse! + int string_hash(const char* t) { + char c; + int k = 0; + int32_t sum = 0; + const char* s = (const char*)t; + + while (((c = *s++) != '\0')) { + c = (c << 1) + 1; + sum += c + (c << (k++ % 6)); + } + return abs((int)((sum + 261) >> 1)); + } + + void put(MemTag tag, const char* name) { + int bucket = string_hash(name) % table_size; + EntryRef link = table[bucket]; + while (link != Nil) { + Entry e = entries.at(link); + MemTagI ei = index(e.tag); + if (strcmp(names.at(ei), name) == 0) { + return; + } + link = e.next; + } + const char* name_copy = os::strdup(name, mtNMT); + MemTagI idx = index(tag); + names.at_grow(idx, name_copy); + Entry nentry(tag, table[bucket]); + entries.push(nentry); + table[bucket] = entries.length() - 1; + } + + MemTag tag_of(const char* name) { + int bucket = string_hash(name) % table_size; + EntryRef link = table[bucket]; + while (link != Nil) { + Entry e = entries.at(link); + if (strcmp(names.at(index(e.tag)), name) == 0) { + return e.tag; + } + link = e.next; + } + return mtNone; + } + + const char* name_of(MemTag tag) { + return names.at(index(tag)); + } + + const char* human_readable_name_of(MemTag tag) { + MemTagI i = index(tag); + if (i < human_readable_names.length()) { + return human_readable_names.at(index(tag)); + } + return nullptr; + } + void set_human_readable_name_of(MemTag tag, const char* hrn) { + MemTagI i = index(tag); + const char* copy = os::strdup(hrn); + const char*& ref = human_readable_names.at_grow(i, nullptr); + ref = copy; + } + + int number_of_tags() { + return entries.length(); + } +}; + +struct MemTagFactory { + using MemTagI = std::underlying_type_t; + + struct Instance { + NameToTagTable table; + MemTagI current_index; + + Instance() : table(), current_index(0) { +#define MEMORY_TAG_ADD_TO_TABLE(mem_tag, human_readable) \ + MemTag mem_tag = tag(#mem_tag); \ + set_human_readable_name_of(mem_tag, human_readable); + +MEMORY_TAG_DO(MEMORY_TAG_ADD_TO_TABLE) +#undef MEMORY_TAG_ADD_TO_TABLE + } + MemTag tag(const char* name) { + MemTag found = table.tag_of(name); + if (found != mtNone) { + return found; + } + MemTag i = static_cast(current_index); + table.put(i, name); + current_index++; + return i; + } + + const char* name_of(MemTag tag) { + return table.name_of(tag); + } + + const char* human_readable_name_of(MemTag tag) { + return table.human_readable_name_of(tag); + } + + void set_human_readable_name_of(MemTag tag, const char* hrn) { + return table.set_human_readable_name_of(tag, hrn); + } + + int number_of_tags() { + return table.number_of_tags(); + } + }; + + static Deferred _instance; + + static void initialize() { + NmtMemTagLocker nvml; + _instance.initialize(); + } + static MemTag tag(const char* name) { + NmtMemTagLocker nvml; + return _instance->tag(name); + } + + static const char* name_of(MemTag tag) { + NmtMemTagLocker nvml; + return _instance->name_of(tag); + } + + static const char* human_readable_name_of(MemTag tag) { + NmtMemTagLocker nvml; + return _instance->human_readable_name_of(tag); + } + + static void set_human_readable_name_of(MemTag tag, const char* hrn) { + NmtMemTagLocker nvml; + return _instance->set_human_readable_name_of(tag, hrn); + } + + static int number_of_tags() { + NmtMemTagLocker nvml; + return _instance->number_of_tags(); + } +}; + +#endif // SHARE_NMT_MEMTAGFACTORY_HPP diff --git a/src/hotspot/share/nmt/memTracker.cpp b/src/hotspot/share/nmt/memTracker.cpp index b3d94ceabec3c..da6942ee50cd1 100644 --- a/src/hotspot/share/nmt/memTracker.cpp +++ b/src/hotspot/share/nmt/memTracker.cpp @@ -52,8 +52,6 @@ NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown; Deferred MemTracker::_baseline; -bool MemTracker::NmtVirtualMemoryLocker::_safe_to_use; - void MemTracker::initialize() { bool rc = true; assert(_tracking_level == NMT_unknown, "only call once"); @@ -63,14 +61,12 @@ void MemTracker::initialize() { assert(level == NMT_off || level == NMT_summary || level == NMT_detail, "Invalid setting for NativeMemoryTracking (%s)", NativeMemoryTracking); - // Memory tag is encoded into tracking header as a byte field, - // make sure that we don't overflow it. - STATIC_ASSERT(mt_number_of_tags <= max_jubyte); - if (level > NMT_off) { + MemTagFactory::initialize(); _baseline.initialize(); if (!MallocTracker::initialize(level) || !MemoryFileTracker::Instance::initialize(level) || + !VirtualMemorySummary::initialize() || !VirtualMemoryTracker::initialize(level)) { assert(false, "NMT initialization failed"); level = NMT_off; diff --git a/src/hotspot/share/nmt/memTracker.hpp b/src/hotspot/share/nmt/memTracker.hpp index c208a02054fec..79266e9605aa7 100644 --- a/src/hotspot/share/nmt/memTracker.hpp +++ b/src/hotspot/share/nmt/memTracker.hpp @@ -30,9 +30,10 @@ #include "nmt/memBaseline.hpp" #include "nmt/nmtCommon.hpp" #include "nmt/memoryFileTracker.hpp" +#include "nmt/nmtCommon.hpp" +#include "nmt/nmtLocker.hpp" #include "nmt/threadStackTracker.hpp" #include "nmt/virtualMemoryTracker.hpp" -#include "runtime/mutexLocker.hpp" #include "utilities/debug.hpp" #include "utilities/nativeCallStack.hpp" #include "utilities/deferred.hpp" @@ -279,39 +280,6 @@ class MemTracker : AllStatic { // and return true; false if not found. static bool print_containing_region(const void* p, outputStream* out); - /* - * NmtVirtualMemoryLocker is similar to MutexLocker but can be used during VM init before mutexes are ready or - * current thread has been assigned. Performs no action during VM init. - * - * Unlike malloc, NMT requires locking for virtual memory operations. This is because it must synchronize the usage - * of global data structures used for modelling the effect of virtual memory operations. - * It is important that locking is used such that the actual OS memory operations (mmap) are done atomically with the - * corresponding NMT accounting (updating the internal model). Currently, this is not the case in all situations - * (see JDK-8341491), but this should be changed in the future. - * - * An issue with using Mutex is that NMT is used early during VM initialization before mutexes are initialized - * and current thread is attached. Mutexes do not work under those conditions, so we must use a flag to avoid - * attempting to lock until initialization is finished. Lack of synchronization here should not be a problem since it - * is single threaded at that point in time anyway. - */ - class NmtVirtualMemoryLocker: StackObj { - // Returns true if it is safe to start using this locker. - static bool _safe_to_use; - ConditionalMutexLocker _cml; - - public: - NmtVirtualMemoryLocker(): _cml(NmtVirtualMemory_lock, _safe_to_use, Mutex::_no_safepoint_check_flag){} - - static inline bool is_safe_to_use() { - return _safe_to_use; - } - - // Set in Threads::create_vm once threads and mutexes have been initialized. - static inline void set_safe_to_use() { - _safe_to_use = true; - } - }; - private: static void report(bool summary_only, outputStream* output, size_t scale); diff --git a/src/hotspot/share/nmt/memoryFileTracker.cpp b/src/hotspot/share/nmt/memoryFileTracker.cpp index d753a57ede7f1..b9ca948c329ce 100644 --- a/src/hotspot/share/nmt/memoryFileTracker.cpp +++ b/src/hotspot/share/nmt/memoryFileTracker.cpp @@ -43,7 +43,7 @@ void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset, NativeCallStackStorage::StackIndex sidx = _stack_storage.push(stack); VMATree::RegionData regiondata(sidx, mem_tag); VMATree::SummaryDiff diff = file->_tree.commit_mapping(offset, size, regiondata); - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { VirtualMemory* summary = file->_summary.by_tag(NMTUtil::index_to_tag(i)); summary->reserve_memory(diff.tag[i].commit); summary->commit_memory(diff.tag[i].commit); @@ -52,7 +52,7 @@ void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset, void MemoryFileTracker::free_memory(MemoryFile* file, size_t offset, size_t size) { VMATree::SummaryDiff diff = file->_tree.release_mapping(offset, size); - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { VirtualMemory* summary = file->_summary.by_tag(NMTUtil::index_to_tag(i)); summary->reserve_memory(diff.tag[i].commit); summary->commit_memory(diff.tag[i].commit); @@ -88,7 +88,7 @@ void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* st start_addr, end_addr, NMTUtil::amount_in_scale(end_addr - start_addr, scale), NMTUtil::scale_name(scale), - NMTUtil::tag_to_name(prev->val().out.mem_tag())); + MemTagFactory::human_readable_name_of(prev->val().out.mem_tag())); { StreamIndentor si(stream, 4); _stack_storage.get(prev->val().out.stack()).print_on(stream); diff --git a/src/hotspot/share/nmt/memoryFileTracker.hpp b/src/hotspot/share/nmt/memoryFileTracker.hpp index cd7a3fb8593f7..de07326ce2e6b 100644 --- a/src/hotspot/share/nmt/memoryFileTracker.hpp +++ b/src/hotspot/share/nmt/memoryFileTracker.hpp @@ -78,7 +78,7 @@ class MemoryFileTracker { void iterate_summary(F f) const { for (int d = 0; d < _files.length(); d++) { const MemoryFile* file = _files.at(d); - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { f(NMTUtil::index_to_tag(i), file->_summary.by_tag(NMTUtil::index_to_tag(i))); } } diff --git a/src/hotspot/share/nmt/nmtCommon.cpp b/src/hotspot/share/nmt/nmtCommon.cpp index 834f27152ee36..bc1deb448d476 100644 --- a/src/hotspot/share/nmt/nmtCommon.cpp +++ b/src/hotspot/share/nmt/nmtCommon.cpp @@ -25,17 +25,11 @@ #include "nmt/nmtCommon.hpp" #include "utilities/globalDefinitions.hpp" + STATIC_ASSERT(NMT_off > NMT_unknown); STATIC_ASSERT(NMT_summary > NMT_off); STATIC_ASSERT(NMT_detail > NMT_summary); -#define MEMORY_TAG_DECLARE_NAME(tag, human_readable) \ - { #tag, human_readable }, - -NMTUtil::S NMTUtil::_strings[] = { - MEMORY_TAG_DO(MEMORY_TAG_DECLARE_NAME) -}; - const char* NMTUtil::scale_name(size_t scale) { switch(scale) { case 1: return ""; @@ -86,16 +80,3 @@ NMT_TrackingLevel NMTUtil::parse_tracking_level(const char* s) { } return NMT_unknown; } - -MemTag NMTUtil::string_to_mem_tag(const char* s) { - for (int i = 0; i < mt_number_of_tags; i ++) { - assert(::strlen(_strings[i].enum_s) > 2, "Sanity"); // should always start with "mt" - if (::strcasecmp(_strings[i].human_readable, s) == 0 || - ::strcasecmp(_strings[i].enum_s, s) == 0 || - ::strcasecmp(_strings[i].enum_s + 2, s) == 0) // "mtXXX" -> match also "XXX" or "xxx" - { - return (MemTag)i; - } - } - return mtNone; -} diff --git a/src/hotspot/share/nmt/nmtCommon.hpp b/src/hotspot/share/nmt/nmtCommon.hpp index 5ba066a29d146..e8c8eb59895bd 100644 --- a/src/hotspot/share/nmt/nmtCommon.hpp +++ b/src/hotspot/share/nmt/nmtCommon.hpp @@ -29,7 +29,9 @@ #include "memory/allStatic.hpp" #include "nmt/memTag.hpp" +#include "nmt/memTagFactory.hpp" #include "utilities/align.hpp" +#include "utilities/deferred.hpp" #include "utilities/globalDefinitions.hpp" // Native memory tracking level @@ -74,10 +76,10 @@ const int NMT_TrackingStackDepth = 4; // A few common utilities for native memory tracking class NMTUtil : AllStatic { - public: +public: // Check if index is a valid MemTag enum value (including mtNone) static inline bool tag_index_is_valid(int index) { - return index >= 0 && index < mt_number_of_tags; + return index >= 0 && index < MemTagFactory::number_of_tags(); } // Check if tag value is a valid MemTag enum value (including mtNone) @@ -92,16 +94,6 @@ class NMTUtil : AllStatic { return static_cast(mem_tag); } - // Map memory tag to human readable name - static const char* tag_to_name(MemTag mem_tag) { - return _strings[tag_to_index(mem_tag)].human_readable; - } - - // Map memory tag to literalized enum name (e.g. "mtTest") - static const char* tag_to_enum_name(MemTag mem_tag) { - return _strings[tag_to_index(mem_tag)].enum_s; - } - // Map an index to memory tag static MemTag index_to_tag(int index) { assert(tag_index_is_valid(index), "Invalid tag index (%d)", index); @@ -121,20 +113,8 @@ class NMTUtil : AllStatic { // string is not a valid level. static NMT_TrackingLevel parse_tracking_level(const char* s); - // Given a string, return associated mem_tag. mtNone if name is invalid. - // String can be either the human readable name or the - // stringified enum (with or without leading "mt". In all cases, case is ignored. - static MemTag string_to_mem_tag(const char* name); - // Returns textual representation of a tracking level. static const char* tracking_level_to_string(NMT_TrackingLevel level); - - private: - struct S { - const char* enum_s; // e.g. "mtNMT" - const char* human_readable; // e.g. "Native Memory Tracking" - }; - static S _strings[mt_number_of_tags]; }; diff --git a/src/hotspot/share/nmt/nmtLocker.cpp b/src/hotspot/share/nmt/nmtLocker.cpp new file mode 100644 index 0000000000000..8475a4fb139a9 --- /dev/null +++ b/src/hotspot/share/nmt/nmtLocker.cpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "nmt/nmtLocker.hpp" + +bool NmtVirtualMemoryLocker::_safe_to_use; +bool NmtMemTagLocker::_safe_to_use; diff --git a/src/hotspot/share/nmt/nmtLocker.hpp b/src/hotspot/share/nmt/nmtLocker.hpp new file mode 100644 index 0000000000000..fac7ea025edbe --- /dev/null +++ b/src/hotspot/share/nmt/nmtLocker.hpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_NMT_NMTLOCKER_HPP +#define SHARE_NMT_NMTLOCKER_HPP + +#include "runtime/mutexLocker.hpp" + +/* + * NmtVirtualMemoryLocker is similar to MutexLocker but can be used during VM init before mutexes are ready or + * current thread has been assigned. Performs no action during VM init. + * + * Unlike malloc, NMT requires locking for virtual memory operations. This is because it must synchronize the usage + * of global data structures used for modelling the effect of virtual memory operations. + * It is important that locking is used such that the actual OS memory operations (mmap) are done atomically with the + * corresponding NMT accounting (updating the internal model). Currently, this is not the case in all situations + * (see JDK-8341491), but this should be changed in the future. + * + * An issue with using Mutex is that NMT is used early during VM initialization before mutexes are initialized + * and current thread is attached. Mutexes do not work under those conditions, so we must use a flag to avoid + * attempting to lock until initialization is finished. Lack of synchronization here should not be a problem since it + * is single threaded at that point in time anyway. + */ +class NmtVirtualMemoryLocker : StackObj { + // Returns true if it is safe to start using this locker. + static bool _safe_to_use; + ConditionalMutexLocker _cml; + +public: + NmtVirtualMemoryLocker() + : _cml(NmtVirtualMemory_lock, _safe_to_use, Mutex::_no_safepoint_check_flag) { + } + + static inline bool is_safe_to_use() { + return _safe_to_use; + } + + // Set in Threads::create_vm once threads and mutexes have been initialized. + static inline void set_safe_to_use() { + _safe_to_use = true; + } +}; + +class NmtMemTagLocker : StackObj { + // Returns true if it is safe to start using this locker. + static bool _safe_to_use; + ConditionalMutexLocker _cml; + +public: + NmtMemTagLocker() + : _cml(NmtMemTag_lock, _safe_to_use, Mutex::_no_safepoint_check_flag) { + } + + static inline bool is_safe_to_use() { + return _safe_to_use; + } + + // Set in Threads::create_vm once threads and mutexes have been initialized. + static inline void set_safe_to_use() { + _safe_to_use = true; + } +}; + + +#endif // SHARE_NMT_NMTLOCKER_HPP diff --git a/src/hotspot/share/nmt/nmtUsage.cpp b/src/hotspot/share/nmt/nmtUsage.cpp index f8ca1b0e1fa41..c2f8286852ca1 100644 --- a/src/hotspot/share/nmt/nmtUsage.cpp +++ b/src/hotspot/share/nmt/nmtUsage.cpp @@ -61,10 +61,10 @@ void NMTUsage::update_malloc_usage() { } size_t total_arena_size = 0; - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { MemTag mem_tag = NMTUtil::index_to_tag(i); const MallocMemory* mm = ms->by_tag(mem_tag); - _malloc_by_type[i] = mm->malloc_size() + mm->arena_size(); + _malloc_by_type.at_grow(i) = mm->malloc_size() + mm->arena_size(); total_arena_size += mm->arena_size(); } @@ -72,11 +72,11 @@ void NMTUsage::update_malloc_usage() { _malloc_total = ms->total(); // Adjustment due to mtChunk double counting. - _malloc_by_type[NMTUtil::tag_to_index(mtChunk)] -= total_arena_size; + _malloc_by_type.at_grow(NMTUtil::tag_to_index(mtChunk)) -= total_arena_size; _malloc_total -= total_arena_size; // Adjust mtNMT to include malloc overhead. - _malloc_by_type[NMTUtil::tag_to_index(mtNMT)] += ms->malloc_overhead(); + _malloc_by_type.at_grow(NMTUtil::tag_to_index(mtNMT)) += ms->malloc_overhead(); } void NMTUsage::update_vm_usage() { @@ -85,22 +85,22 @@ void NMTUsage::update_vm_usage() { // Reset total to allow recalculation. _vm_total.committed = 0; _vm_total.reserved = 0; - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { MemTag mem_tag = NMTUtil::index_to_tag(i); const VirtualMemory* vm = vms->by_tag(mem_tag); - _vm_by_type[i].reserved = vm->reserved(); - _vm_by_type[i].committed = vm->committed(); + _vm_by_type.at_grow(i).reserved = vm->reserved(); + _vm_by_type.at_grow(i).committed = vm->committed(); _vm_total.reserved += vm->reserved(); _vm_total.committed += vm->committed(); } { // MemoryFileTracker addition using MFT = MemoryFileTracker::Instance; - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; MFT::iterate_summary([&](MemTag tag, const VirtualMemory* vm) { int i = NMTUtil::tag_to_index(tag); - _vm_by_type[i].committed += vm->committed(); + _vm_by_type.at_grow(i).committed += vm->committed(); _vm_total.committed += vm->committed(); }); } @@ -132,10 +132,12 @@ size_t NMTUsage::total_committed() const { size_t NMTUsage::reserved(MemTag mem_tag) const { int index = NMTUtil::tag_to_index(mem_tag); - return _malloc_by_type[index] + _vm_by_type[index].reserved; + return const_cast&>(_malloc_by_type).at_grow(index) + + const_cast&>(_vm_by_type).at_grow(index).reserved; } size_t NMTUsage::committed(MemTag mem_tag) const { int index = NMTUtil::tag_to_index(mem_tag); - return _malloc_by_type[index] + _vm_by_type[index].committed; + return const_cast&>(_malloc_by_type).at_grow(index) + + const_cast&>(_vm_by_type).at_grow(index).committed; } diff --git a/src/hotspot/share/nmt/nmtUsage.hpp b/src/hotspot/share/nmt/nmtUsage.hpp index 2011e7ed240f5..8f1f193439527 100644 --- a/src/hotspot/share/nmt/nmtUsage.hpp +++ b/src/hotspot/share/nmt/nmtUsage.hpp @@ -28,6 +28,7 @@ #include "memory/allocation.hpp" #include "nmt/memTag.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/growableArray.hpp" struct NMTUsagePair { size_t reserved; @@ -35,16 +36,16 @@ struct NMTUsagePair { }; struct NMTUsageOptions { - bool update_thread_stacks; - bool include_malloc; - bool include_vm; + bool update_thread_stacks : 1; + bool include_malloc : 1; + bool include_vm : 1; }; class NMTUsage : public CHeapObj { private: - size_t _malloc_by_type[mt_number_of_tags]; + GrowableArrayCHeap _malloc_by_type; size_t _malloc_total; - NMTUsagePair _vm_by_type[mt_number_of_tags]; + GrowableArrayCHeap _vm_by_type; NMTUsagePair _vm_total; NMTUsageOptions _usage_options; diff --git a/src/hotspot/share/nmt/threadStackTracker.cpp b/src/hotspot/share/nmt/threadStackTracker.cpp index dabb23f0801e3..92d0fc6ef32de 100644 --- a/src/hotspot/share/nmt/threadStackTracker.cpp +++ b/src/hotspot/share/nmt/threadStackTracker.cpp @@ -51,7 +51,7 @@ void ThreadStackTracker::new_thread_stack(void* base, size_t size, const NativeC assert(base != nullptr, "Should have been filtered"); align_thread_stack_boundaries_inward(base, size); - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack); _thread_count++; } @@ -61,7 +61,7 @@ void ThreadStackTracker::delete_thread_stack(void* base, size_t size) { assert(base != nullptr, "Should have been filtered"); align_thread_stack_boundaries_inward(base, size); - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; VirtualMemoryTracker::remove_released_region((address)base, size); _thread_count--; } diff --git a/src/hotspot/share/nmt/virtualMemoryTracker.cpp b/src/hotspot/share/nmt/virtualMemoryTracker.cpp index 04a720794607b..c4c232547efa2 100644 --- a/src/hotspot/share/nmt/virtualMemoryTracker.cpp +++ b/src/hotspot/share/nmt/virtualMemoryTracker.cpp @@ -22,16 +22,14 @@ * */ #include "logging/log.hpp" -#include "memory/metaspaceStats.hpp" #include "memory/metaspaceUtils.hpp" #include "nmt/memTracker.hpp" #include "nmt/nativeCallStackPrinter.hpp" -#include "nmt/threadStackTracker.hpp" #include "nmt/virtualMemoryTracker.hpp" #include "runtime/os.hpp" #include "utilities/ostream.hpp" -VirtualMemorySnapshot VirtualMemorySummary::_snapshot; +Deferred VirtualMemorySummary::_snapshot; void VirtualMemory::update_peak(size_t size) { size_t peak_sz = peak_size(); @@ -242,7 +240,7 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { size_t size = del_rgn.end() - crgn->base(); crgn->exclude_region(crgn->base(), size); VirtualMemorySummary::record_uncommitted_memory(size, mem_tag()); - return true; // should be done if the list is sorted properly! + return true; // should be done if the list is sorted properly! } prev = head; @@ -256,8 +254,7 @@ void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRe assert(addr != nullptr, "Invalid address"); // split committed regions - LinkedListNode* head = - _committed_regions.head(); + LinkedListNode* head = _committed_regions.head(); LinkedListNode* prev = nullptr; while (head != nullptr) { @@ -281,8 +278,7 @@ void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRe size_t ReservedMemoryRegion::committed_size() const { size_t committed = 0; - LinkedListNode* head = - _committed_regions.head(); + LinkedListNode* head = _committed_regions.head(); while (head != nullptr) { committed += head->data()->size(); head = head->next(); @@ -325,24 +321,28 @@ bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { assert(_reserved_regions == nullptr, "only call once"); if (level >= NMT_summary) { _reserved_regions = new (std::nothrow, mtNMT) - SortedLinkedList(); + SortedLinkedList(); return (_reserved_regions != nullptr); } return true; } bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, - const NativeCallStack& stack, MemTag mem_tag) { + const NativeCallStack& stack, MemTag mem_tag) { assert(base_addr != nullptr, "Invalid address"); assert(size > 0, "Invalid size"); assert(_reserved_regions != nullptr, "Sanity check"); MemTracker::assert_locked(); - ReservedMemoryRegion rgn(base_addr, size, stack, mem_tag); + ReservedMemoryRegion rgn(base_addr, size, stack, mem_tag); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); - log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", %zu)", - rgn.mem_tag_name(), p2i(rgn.base()), rgn.size()); + LogTarget(Debug, nmt) target; + if (target.is_enabled()) { + log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", %zu)", rgn.mem_tag_name(), + p2i(rgn.base()), rgn.size()); + } + if (reserved_rgn == nullptr) { VirtualMemorySummary::record_reserved_memory(size, mem_tag); return _reserved_regions->add(rgn) != nullptr; @@ -367,7 +367,8 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, // Overwrite with new region // Release old region - VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->mem_tag()); + VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), + reserved_rgn->mem_tag()); VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->mem_tag()); // Add new region @@ -381,23 +382,34 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. // NMT reports CDS as a whole. if (reserved_rgn->mem_tag() == mtClassShared) { - log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)", - reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); - assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); + LogTarget(Debug, nmt) target; + if (target.is_enabled()) { + log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)", + reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), + reserved_rgn->size()); + } + assert(reserved_rgn->contain_region(base_addr, size), + "Reserved CDS region should contain this mapping region"); return true; } // Mapped CDS string region. // The string region(s) is part of the java heap. if (reserved_rgn->mem_tag() == mtJavaHeap) { - log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)", - reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); - assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region"); + LogTarget(Debug, nmt) target; + if (target.is_enabled()) { + log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)", + reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), + reserved_rgn->size()); + } + assert(reserved_rgn->contain_region(base_addr, size), + "Reserved heap region should contain this mapping region"); return true; } if (reserved_rgn->mem_tag() == mtCode) { - assert(reserved_rgn->contain_region(base_addr, size), "Reserved code region should contain this mapping region"); + assert(reserved_rgn->contain_region(base_addr, size), + "Reserved code region should contain this mapping region"); return true; } @@ -405,8 +417,8 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, stringStream ss; ss.print_cr("Error: old region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %s.\n" " new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %s.", - p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), NMTUtil::tag_to_name(reserved_rgn->mem_tag()), - p2i(base_addr), p2i(base_addr + size), NMTUtil::tag_to_name(mem_tag)); + p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), MemTagFactory::human_readable_name_of(reserved_rgn->mem_tag()), + p2i(base_addr), p2i(base_addr + size), MemTagFactory::human_readable_name_of(mem_tag)); if (MemTracker::tracking_level() == NMT_detail) { ss.print_cr("Existing region allocated from:"); reserved_rgn->call_stack()->print_on(&ss); @@ -432,7 +444,7 @@ void VirtualMemoryTracker::set_reserved_region_type(address addr, size_t size, M assert(reserved_rgn->contain_address(addr), "Containment"); if (reserved_rgn->mem_tag() != mem_tag) { assert(reserved_rgn->mem_tag() == mtNone, "Overwrite memory tag (should be mtNone, is: \"%s\")", - NMTUtil::tag_to_name(reserved_rgn->mem_tag())); + MemTagFactory::human_readable_name_of(reserved_rgn->mem_tag())); reserved_rgn->set_mem_tag(mem_tag); } } @@ -448,15 +460,18 @@ bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, ReservedMemoryRegion rgn(addr, size); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); - if (reserved_rgn == nullptr) { + LogTarget(Debug, nmt) target; + if (reserved_rgn == nullptr && target.is_enabled()) { log_debug(nmt)("Add committed region \'%s\', No reserved region found for (" INTPTR_FORMAT ", %zu)", rgn.mem_tag_name(), p2i(rgn.base()), rgn.size()); } assert(reserved_rgn != nullptr, "Add committed region, No reserved region found"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); bool result = reserved_rgn->add_committed_region(addr, size, stack); - log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", %zu) %s", - reserved_rgn->mem_tag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed")); + if (target.is_enabled()) { + log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", %zu) %s", + reserved_rgn->mem_tag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed")); + } return result; } @@ -470,10 +485,14 @@ bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", %zu)", p2i(addr), size); assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); - const char* type_name = reserved_rgn->mem_tag_name(); // after remove, info is not complete bool result = reserved_rgn->remove_uncommitted_region(addr, size); - log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s", - type_name, p2i(addr), size, (result ? " Succeeded" : "Failed")); + + LogTarget(Debug, nmt) target; + if (target.is_enabled()) { + const char* type_name = reserved_rgn->mem_tag_name(); // after remove, info is not complete + log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s", + type_name, p2i(addr), size, (result ? " Succeeded" : "Failed")); + } return result; } @@ -485,16 +504,24 @@ bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) { // uncommit regions within the released region ReservedMemoryRegion backup(*rgn); bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size()); - log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s", - backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); + + LogTarget(Debug, nmt) target; + if (target.is_enabled()) { + log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s", + backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); + } if (!result) { return false; } VirtualMemorySummary::record_released_memory(rgn->size(), rgn->mem_tag()); result = _reserved_regions->remove(*rgn); - log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", %zu) from _reserved_regions %s" , - backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); + + if (target.is_enabled()) { + log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", %zu) from _reserved_regions %s", + backup.mem_tag_name(), p2i(backup.base()), backup.size(), + (result ? "Succeeded" : "Failed")); + } return result; } @@ -582,10 +609,15 @@ bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size NativeCallStack original_stack = *reserved_rgn->call_stack(); MemTag original_tag = reserved_rgn->mem_tag(); - const char* name = reserved_rgn->mem_tag_name(); remove_released_region(reserved_rgn); - log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", %zu) with size %zu", - name, p2i(rgn.base()), rgn.size(), split); + + LogTarget(Debug, nmt) target; + if (target.is_enabled()) { + const char* name = reserved_rgn->mem_tag_name(); + log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", %zu) with size %zu", name, + p2i(rgn.base()), rgn.size(), split); + } + // Now, create two new regions. add_reserved_region(addr, split, original_stack, mem_tag); add_reserved_region(addr + split, size - split, original_stack, split_tag); @@ -634,7 +666,7 @@ class SnapshotThreadStackWalker : public VirtualMemoryWalker { SnapshotThreadStackWalker() {} bool do_allocation_site(const ReservedMemoryRegion* rgn) { - if (MemTracker::NmtVirtualMemoryLocker::is_safe_to_use()) { + if (NmtVirtualMemoryLocker::is_safe_to_use()) { assert_lock_strong(NmtVirtualMemory_lock); } if (rgn->mem_tag() == mtThreadStack) { @@ -677,7 +709,7 @@ void VirtualMemoryTracker::snapshot_thread_stacks() { bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { assert(_reserved_regions != nullptr, "Sanity check"); - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; // Check that the _reserved_regions haven't been deleted. if (_reserved_regions != nullptr) { LinkedListNode* head = _reserved_regions->head(); @@ -704,7 +736,7 @@ class PrintRegionWalker : public VirtualMemoryWalker { bool do_allocation_site(const ReservedMemoryRegion* rgn) { if (rgn->contain_address(_p)) { _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s", - p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::tag_to_enum_name(rgn->mem_tag())); + p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), MemTagFactory::name_of(rgn->mem_tag())); if (MemTracker::tracking_level() == NMT_detail) { _stackprinter.print_stack(rgn->call_stack()); _st->cr(); diff --git a/src/hotspot/share/nmt/virtualMemoryTracker.hpp b/src/hotspot/share/nmt/virtualMemoryTracker.hpp index 2b3b572257114..8880bd9262d07 100644 --- a/src/hotspot/share/nmt/virtualMemoryTracker.hpp +++ b/src/hotspot/share/nmt/virtualMemoryTracker.hpp @@ -29,11 +29,13 @@ #include "memory/metaspace.hpp" // For MetadataType #include "memory/metaspaceStats.hpp" #include "nmt/allocationSite.hpp" +#include "nmt/contiguousAllocator.hpp" #include "nmt/nmtCommon.hpp" #include "runtime/atomic.hpp" #include "utilities/linkedlist.hpp" #include "utilities/nativeCallStack.hpp" #include "utilities/ostream.hpp" +#include "utilities/deferred.hpp" /* * Virtual memory counter @@ -91,11 +93,12 @@ class VirtualMemorySummary; // This class represents a snapshot of virtual memory at a given time. // The latest snapshot is saved in a static area. -class VirtualMemorySnapshot : public ResourceObj { +class VirtualMemorySnapshot { friend class VirtualMemorySummary; private: - VirtualMemory _virtual_memory[mt_number_of_tags]; + using VirtualMemoryArray = NMTStaticArray>; + VirtualMemoryArray _virtual_memory; public: inline VirtualMemory* by_tag(MemTag mem_tag) { @@ -110,7 +113,7 @@ class VirtualMemorySnapshot : public ResourceObj { inline size_t total_reserved() const { size_t amount = 0; - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) { amount += _virtual_memory[index].reserved(); } return amount; @@ -118,14 +121,14 @@ class VirtualMemorySnapshot : public ResourceObj { inline size_t total_committed() const { size_t amount = 0; - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) { amount += _virtual_memory[index].committed(); } return amount; } void copy_to(VirtualMemorySnapshot* s) { - for (int index = 0; index < mt_number_of_tags; index ++) { + for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) { s->_virtual_memory[index] = _virtual_memory[index]; } } @@ -133,7 +136,6 @@ class VirtualMemorySnapshot : public ResourceObj { class VirtualMemorySummary : AllStatic { public: - static inline void record_reserved_memory(size_t size, MemTag mem_tag) { as_snapshot()->by_tag(mem_tag)->reserve_memory(size); } @@ -167,11 +169,16 @@ class VirtualMemorySummary : AllStatic { static void snapshot(VirtualMemorySnapshot* s); static VirtualMemorySnapshot* as_snapshot() { - return &_snapshot; + return _snapshot.get(); + } + + static bool initialize() { + _snapshot.initialize(); + return true; } private: - static VirtualMemorySnapshot _snapshot; + static Deferred _snapshot; }; @@ -350,7 +357,7 @@ class ReservedMemoryRegion : public VirtualMemoryRegion { return *this; } - const char* mem_tag_name() const { return NMTUtil::tag_to_name(_mem_tag); } + const char* mem_tag_name() const { return MemTagFactory::name_of(_mem_tag); } private: // The committed region contains the uncommitted region, subtract the uncommitted diff --git a/src/hotspot/share/nmt/vmatree.cpp b/src/hotspot/share/nmt/vmatree.cpp index 3352a6e5cd44e..16e316cf09d16 100644 --- a/src/hotspot/share/nmt/vmatree.cpp +++ b/src/hotspot/share/nmt/vmatree.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (cMemTagFactory::number_of_tags()/or its affiliates. All rights reserved. * Copyright (c) 2024, Red Hat Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -37,7 +37,7 @@ const char* VMATree::statetype_strings[3] = { VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType state, const RegionData& metadata, bool use_tag_inplace) { assert(!use_tag_inplace || metadata.mem_tag == mtNone, - "If using use_tag_inplace, then the supplied tag should be mtNone, was instead: %s", NMTUtil::tag_to_name(metadata.mem_tag)); + "If using use_tag_inplace, then the supplied tag should be mtNone, was instead: %s", MemTagFactory::human_readable_name_of(metadata.mem_tag)); if (A == B) { // A 0-sized mapping isn't worth recording. return SummaryDiff(); @@ -219,7 +219,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType #ifdef ASSERT void VMATree::print_on(outputStream* out) { visit_in_order([&](TreapNode* current) { - out->print("%zu (%s) - %s - ", current->key(), NMTUtil::tag_to_name(out_state(current).mem_tag()), + out->print("%zu (%s) - %s - ", current->key(), MemTagFactory::human_readable_name_of(out_state(current).mem_tag()), statetype_to_string(out_state(current).type())); }); out->cr(); @@ -302,11 +302,11 @@ VMATree::SummaryDiff VMATree::set_tag(const position start, const size size, con #ifdef ASSERT void VMATree::SummaryDiff::print_on(outputStream* out) { - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { if (tag[i].reserve == 0 && tag[i].commit == 0) { continue; } - out->print_cr("Tag %s R: " INT64_FORMAT " C: " INT64_FORMAT, NMTUtil::tag_to_enum_name((MemTag)i), tag[i].reserve, + out->print_cr("Tag %s R: " INT64_FORMAT " C: " INT64_FORMAT, MemTagFactory::name_of((MemTag)i), tag[i].reserve, tag[i].commit); } } diff --git a/src/hotspot/share/nmt/vmatree.hpp b/src/hotspot/share/nmt/vmatree.hpp index 0c639e929b7d2..5fb02ee6f96a6 100644 --- a/src/hotspot/share/nmt/vmatree.hpp +++ b/src/hotspot/share/nmt/vmatree.hpp @@ -26,6 +26,7 @@ #ifndef SHARE_NMT_VMATREE_HPP #define SHARE_NMT_VMATREE_HPP +#include "memory/allocation.hpp" #include "nmt/memTag.hpp" #include "nmt/nmtNativeCallStackStorage.hpp" #include "nmt/nmtTreap.hpp" @@ -174,15 +175,20 @@ class VMATree { }; struct SummaryDiff { - SingleDiff tag[mt_number_of_tags]; + SingleDiff* tag; SummaryDiff() { - for (int i = 0; i < mt_number_of_tags; i++) { + tag = NEW_C_HEAP_ARRAY(SingleDiff, MemTagFactory::number_of_tags(), mtNMT); + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { tag[i] = SingleDiff{0, 0}; } } + ~SummaryDiff() { + os::free(tag); + } + void add(SummaryDiff& other) { - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { tag[i].reserve += other.tag[i].reserve; tag[i].commit += other.tag[i].commit; } diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index eb57d0f97446e..520e6afa66551 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -138,7 +138,8 @@ Mutex* SharedDecoder_lock = nullptr; Mutex* DCmdFactory_lock = nullptr; Mutex* NMTQuery_lock = nullptr; Mutex* NMTCompilationCostHistory_lock = nullptr; -Mutex* NmtVirtualMemory_lock = nullptr; +Mutex* NmtVirtualMemory_lock = nullptr; +Mutex* NmtMemTag_lock = nullptr; #if INCLUDE_CDS #if INCLUDE_JVMTI @@ -295,6 +296,7 @@ void mutex_init() { MUTEX_DEFN(NMTQuery_lock , PaddedMutex , safepoint); MUTEX_DEFN(NMTCompilationCostHistory_lock , PaddedMutex , nosafepoint); MUTEX_DEFN(NmtVirtualMemory_lock , PaddedMutex , service-4); // Must be lower than G1Mapper_lock used from G1RegionsSmallerThanCommitSizeMapper::commit_regions + MUTEX_DEFN(NmtMemTag_lock , PaddedMutex , event-1); #if INCLUDE_CDS #if INCLUDE_JVMTI MUTEX_DEFN(CDSClassFileStream_lock , PaddedMutex , safepoint); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index bc36db00dc3ad..08404c01303eb 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -121,6 +121,7 @@ extern Mutex* DCmdFactory_lock; // serialize access to DCmdFact extern Mutex* NMTQuery_lock; // serialize NMT Dcmd queries extern Mutex* NMTCompilationCostHistory_lock; // guards NMT compilation cost history extern Mutex* NmtVirtualMemory_lock; // guards NMT virtual memory updates +extern Mutex* NmtMemTag_lock; // guards NMT MemTag creation and metadata querying #if INCLUDE_CDS #if INCLUDE_JVMTI extern Mutex* CDSClassFileStream_lock; // FileMapInfo::open_stream_for_jvmti diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index ee1f0a3b08174..722cf68b93d7e 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -717,7 +717,7 @@ void* os::realloc(void *memblock, size_t size, MemTag mem_tag, const NativeCallS // may invalidate the old block, including its header. MallocHeader* header = MallocHeader::resolve_checked(memblock); assert(mem_tag == header->mem_tag(), "weird NMT type mismatch (new:\"%s\" != old:\"%s\")\n", - NMTUtil::tag_to_name(mem_tag), NMTUtil::tag_to_name(header->mem_tag())); + MemTagFactory::human_readable_name_of(mem_tag), MemTagFactory::human_readable_name_of(header->mem_tag())); const MallocHeader::FreeInfo free_info = header->free_info(); header->mark_block_as_dead(); @@ -2263,7 +2263,7 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) { assert_nonempty_range(addr, bytes); bool res; if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; res = pd_uncommit_memory(addr, bytes, executable); if (res) { MemTracker::record_virtual_memory_uncommit(addr, bytes); @@ -2290,7 +2290,7 @@ bool os::release_memory(char* addr, size_t bytes) { assert_nonempty_range(addr, bytes); bool res; if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; res = pd_release_memory(addr, bytes); if (res) { MemTracker::record_virtual_memory_release(addr, bytes); @@ -2375,7 +2375,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset, bool os::unmap_memory(char *addr, size_t bytes) { bool result; if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; result = pd_unmap_memory(addr, bytes); if (result) { MemTracker::record_virtual_memory_release(addr, bytes); @@ -2414,7 +2414,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size bool os::release_memory_special(char* addr, size_t bytes) { bool res; if (MemTracker::enabled()) { - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; res = pd_release_memory_special(addr, bytes); if (res) { MemTracker::record_virtual_memory_release(addr, bytes); diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index b26ec280e7242..273d260b55db6 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -171,6 +171,7 @@ class os: AllStatic { friend class VMStructs; friend class JVMCIVMStructs; friend class MallocTracker; + friend class NMTContiguousAllocator; #ifdef ASSERT private: diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp index 987db37e9d5ad..9790d988bcc5f 100644 --- a/src/hotspot/share/runtime/threads.cpp +++ b/src/hotspot/share/runtime/threads.cpp @@ -57,6 +57,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "nmt/memTracker.hpp" +#include "nmt/nmtLocker.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" @@ -558,8 +559,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { JavaThread* main_thread = new JavaThread(); main_thread->set_thread_state(_thread_in_vm); main_thread->initialize_thread_current(); - // Once mutexes and main_thread are ready, we can use NmtVirtualMemoryLocker. - MemTracker::NmtVirtualMemoryLocker::set_safe_to_use(); + // Once mutexes and main_thread are ready, we can use NmtVirtualMemoryLocker and NmtMemTagLocker. + NmtVirtualMemoryLocker::set_safe_to_use(); + NmtMemTagLocker::set_safe_to_use(); // must do this before set_active_handles main_thread->record_stack_base_and_size(); main_thread->register_thread_stack_with_NMT(); diff --git a/test/hotspot/gtest/nmt/test_memtagarray.cpp b/test/hotspot/gtest/nmt/test_memtagarray.cpp new file mode 100644 index 0000000000000..a73147bb70eee --- /dev/null +++ b/test/hotspot/gtest/nmt/test_memtagarray.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "nmt/memTag.hpp" +#include "nmt/mallocTracker.hpp" +#include "unittest.hpp" + +class NMTMemTagArrayTest : public testing::Test { +public: + using MTArray = MallocMemorySnapshot::MemTagArray; +}; + +TEST_VM_F(NMTMemTagArrayTest, AllocatingTagTest) { + { // Allocate tags in order + MTArray mta; + ASSERT_TRUE(mta.is_valid()) << "must"; + EXPECT_EQ(0, mta.number_of_tags_allocated()); + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { + mta.at((MemTag)i); + } + EXPECT_EQ(MemTagFactory::number_of_tags(), mta.number_of_tags_allocated()); + } + + { // Allocating a tag in the middle also allocates all preceding tags. + MTArray mta; + ASSERT_TRUE(mta.is_valid()) << "must"; + EXPECT_EQ(0, (int)mta.number_of_tags_allocated()); + + mta.at(mtMetaspace); + EXPECT_EQ((int)mtMetaspace + 1, (int)mta.number_of_tags_allocated()); + } +} diff --git a/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp b/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp index 83d4bd6235be6..99e82ecb35f8c 100644 --- a/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp @@ -41,9 +41,9 @@ static bool compare_limits(const malloclimit* a, const malloclimit* b) { static bool compare_sets(const MallocLimitSet* a, const MallocLimitSet* b) { if (compare_limits(a->global_limit(), b->global_limit())) { - for (int i = 0; i < mt_number_of_tags; i++) { - if (!compare_limits(a->category_limit(NMTUtil::index_to_tag(i)), - b->category_limit(NMTUtil::index_to_tag(i)))) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { + if (!compare_limits(const_cast(a)->category_limit(NMTUtil::index_to_tag(i)), + const_cast(b)->category_limit(NMTUtil::index_to_tag(i)))) { return false; } } @@ -95,11 +95,11 @@ TEST(NMT, MallocLimitPerCategory) { TEST(NMT, MallocLimitCategoryEnumNames) { MallocLimitSet expected; stringStream option; - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { MemTag mem_tag = NMTUtil::index_to_tag(i); if (mem_tag != MemTag::mtNone) { expected.set_category_limit(mem_tag, (i + 1) * M, MallocLimitMode::trigger_fatal); - option.print("%s%s:%dM", (i > 0 ? "," : ""), NMTUtil::tag_to_enum_name(mem_tag), i + 1); + option.print("%s%s:%dM", (i > 0 ? "," : ""), MemTagFactory::name_of(mem_tag), i + 1); } } test(option.base(), expected); @@ -108,11 +108,11 @@ TEST(NMT, MallocLimitCategoryEnumNames) { TEST(NMT, MallocLimitAllCategoriesHaveHumanReadableNames) { MallocLimitSet expected; stringStream option; - for (int i = 0; i < mt_number_of_tags; i++) { + for (int i = 0; i < MemTagFactory::number_of_tags(); i++) { MemTag mem_tag = NMTUtil::index_to_tag(i); if (mem_tag != MemTag::mtNone) { expected.set_category_limit(mem_tag, (i + 1) * M, MallocLimitMode::trigger_fatal); - option.print("%s%s:%dM", (i > 0 ? "," : ""), NMTUtil::tag_to_name(mem_tag), i + 1); + option.print("%s%s:%dM", (i > 0 ? "," : ""), MemTagFactory::human_readable_name_of(mem_tag), i + 1); } } test(option.base(), expected); diff --git a/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp b/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp index 2acf54ab4be8a..567463039f2bd 100644 --- a/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp @@ -30,6 +30,9 @@ // Tests the assignment operator of ReservedMemoryRegion TEST_VM(NMT, ReservedRegionCopy) { + if (MemTracker::tracking_level() == NMT_off) { + VirtualMemorySummary::initialize(); + } address dummy1 = (address)0x10000000; NativeCallStack stack1(&dummy1, 1); ReservedMemoryRegion region1(dummy1, os::vm_page_size(), stack1, mtThreadStack); diff --git a/test/hotspot/gtest/nmt/test_nmt_totals.cpp b/test/hotspot/gtest/nmt/test_nmt_totals.cpp index 61c591fa0bbf1..8f07ce5846ba3 100644 --- a/test/hotspot/gtest/nmt/test_nmt_totals.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_totals.cpp @@ -88,7 +88,7 @@ TEST_VM(NMTNumbers, totals) { void* p[NUM_ALLOCS]; for (int i = 0; i < NUM_ALLOCS; i ++) { // spread over categories - int category = i % (mt_number_of_tags - 1); + int category = i % (MemTagFactory::number_of_tags() - 1); p[i] = NEW_C_HEAP_ARRAY(char, ALLOC_SIZE, (MemTag)category); } diff --git a/test/hotspot/gtest/nmt/test_nmtcontigalloc.cpp b/test/hotspot/gtest/nmt/test_nmtcontigalloc.cpp new file mode 100644 index 0000000000000..b5d51be8206ac --- /dev/null +++ b/test/hotspot/gtest/nmt/test_nmtcontigalloc.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "nmt/memTag.hpp" +#include "nmt/contiguousAllocator.hpp" +#include "unittest.hpp" + +TEST_VM(ContiguousAllocatorTest, AllocatingManySmallPiecesShouldSucceed) { + NMTContiguousAllocator nca{os::vm_page_size(), mtTest}; + EXPECT_TRUE(nca.is_reserved()); + + const size_t num_pieces = 1024; + const size_t piece_size = os::vm_page_size() / num_pieces; + char* r[num_pieces]; + for (size_t i = 0; i < num_pieces; i++) { + r[i] = nca.alloc(piece_size); + } + for (size_t i = 0; i < num_pieces; i++) { + if (r[i] == nullptr) { + EXPECT_FALSE(r[i] == nullptr) << "Allocation number " << i << " failed"; + break; + } else { + // Write to each byte, this should not crash. + for (size_t j = 0; j < piece_size; j++) { + *(r[i] + j) = 'a'; + } + } + } +} + +TEST_VM(ContiguousAllocatorTest, AllocatingMoreThanReservedShouldFail) { + NMTContiguousAllocator nca{os::vm_page_size(), mtTest}; + EXPECT_TRUE(nca.is_reserved()); + char* no1 = nca.alloc(os::vm_page_size()); + EXPECT_NE(nullptr, no1); + char* no2 = nca.alloc(1); + EXPECT_EQ(nullptr, no2); +} + +TEST_VM(ContiguousAllocatorTest, CopyingConstructorGivesSeparateMemory) { + NMTContiguousAllocator nca(os::vm_page_size(), mtTest); + NMTContiguousAllocator nca_copy(nca); + char* ncap = nca.alloc(os::vm_page_size()); + char* ncacp = nca_copy.alloc(os::vm_page_size()); + EXPECT_NE(nullptr, ncap); + EXPECT_NE(nullptr, ncacp); + EXPECT_NE(ncap, ncacp); +} + +TEST_VM(ContiguousAllocatorTest, CopyingConstructorCopiesTheMemory) { + NMTContiguousAllocator nca(os::vm_page_size(), mtTest); + char* ncap = nca.alloc(os::vm_page_size()); + strcpy(ncap, "Hello, world"); + NMTContiguousAllocator nca_copy(nca); + char* str = nca_copy.at_offset(0); + EXPECT_EQ(0, strcmp("Hello, world", str)); +} diff --git a/test/hotspot/gtest/nmt/test_vmatree.cpp b/test/hotspot/gtest/nmt/test_vmatree.cpp index 80b5df500621c..c2ff940d34f87 100644 --- a/test/hotspot/gtest/nmt/test_vmatree.cpp +++ b/test/hotspot/gtest/nmt/test_vmatree.cpp @@ -674,7 +674,7 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) { tree_diff = tree.release_mapping(start, size); } - for (int j = 0; j < mt_number_of_tags; j++) { + for (int j = 0; j < MemTagFactory::number_of_tags(); j++) { VMATree::SingleDiff td = tree_diff.tag[j]; VMATree::SingleDiff sd = simple_diff.tag[j]; ASSERT_EQ(td.reserve, sd.reserve); @@ -739,4 +739,4 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccountingWhenUseFlagInplace) { diff = tree.uncommit_mapping(0, 50, rd2); EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve); EXPECT_EQ(-50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit); -} \ No newline at end of file +} diff --git a/test/hotspot/gtest/runtime/test_virtualMemoryTracker.cpp b/test/hotspot/gtest/runtime/test_virtualMemoryTracker.cpp index 2985dd7438d86..3d1975e407b68 100644 --- a/test/hotspot/gtest/runtime/test_virtualMemoryTracker.cpp +++ b/test/hotspot/gtest/runtime/test_virtualMemoryTracker.cpp @@ -92,7 +92,7 @@ class VirtualMemoryTrackerTest { size_t size = 0x01000000; ReservedSpace rs = MemoryReserver::reserve(size, mtTest); - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; address addr = (address)rs.base(); @@ -168,7 +168,7 @@ class VirtualMemoryTrackerTest { size_t size = 0x01000000; ReservedSpace rs = MemoryReserver::reserve(size, mtTest); - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; address addr = (address)rs.base(); @@ -258,7 +258,7 @@ class VirtualMemoryTrackerTest { size_t size = 0x01000000; ReservedSpace rs = MemoryReserver::reserve(size, mtTest); - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; address addr = (address)rs.base(); @@ -431,7 +431,7 @@ class VirtualMemoryTrackerTest { size_t size = 0x01000000; ReservedSpace rs = MemoryReserver::reserve(size, mtTest); - MemTracker::NmtVirtualMemoryLocker nvml; + NmtVirtualMemoryLocker nvml; address addr = (address)rs.base();