Skip to content

WIP: Native memory tracking support in FFM #25757

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 4 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/hotspot/os/posix/perfMemory_posix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1085,7 +1085,7 @@ static char* mmap_create_shared(size_t size) {
static void unmap_shared(char* addr, size_t bytes) {
int res;
if (MemTracker::enabled()) {
MemTracker::NmtVirtualMemoryLocker nvml;
NmtVirtualMemoryLocker nvml;
res = ::munmap(addr, bytes);
if (res == 0) {
MemTracker::record_virtual_memory_release(addr, bytes);
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/os/windows/perfMemory_windows.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1800,7 +1800,7 @@ void PerfMemory::detach(char* addr, size_t bytes) {

if (MemTracker::enabled()) {
// it does not go through os api, the operation has to record from here
MemTracker::NmtVirtualMemoryLocker nvml;
NmtVirtualMemoryLocker nvml;
remove_file_mapping(addr);
MemTracker::record_virtual_memory_release(addr, bytes);
} else {
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/z/zNMT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ void ZNMT::unreserve(zaddress_unsafe start, size_t size) {
// We are the owner of the reserved memory, and any failure to unreserve
// are fatal, so so we don't need to hold a lock while unreserving memory.

MemTracker::NmtVirtualMemoryLocker nvml;
NmtVirtualMemoryLocker nvml;

// The current NMT implementation does not support unreserving a memory
// region that was built up from smaller memory reservations. Workaround
Expand Down
10 changes: 10 additions & 0 deletions src/hotspot/share/include/jvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,16 @@ JNIEXPORT jobject JNICALL
JVM_NewMultiArray(JNIEnv *env, jclass eltClass, jintArray dim);


/*
* Native memory allocation via NMT.
*/
typedef uint16_t arena_t;
JNIEXPORT arena_t JNICALL JVM_MakeArena(const char *name);
JNIEXPORT void *JNICALL JVM_ArenaAlloc(size_t size, arena_t a);
JNIEXPORT void *JNICALL JVM_ArenaRealloc(void *p, size_t size, arena_t a);
JNIEXPORT void *JNICALL JVM_ArenaCalloc(size_t numelems, size_t elemsize, arena_t a);
JNIEXPORT void JNICALL JVM_ArenaFree(void* ptr);

/*
* Returns the immediate caller class of the native method invoking
* JVM_GetCallerClass. The Method.invoke and other frames due to
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ void JfrNativeMemoryEvent::send_type_events(const Ticks& timestamp) {

NMTUsage* usage = get_usage(timestamp);

for (int index = 0; index < mt_number_of_tags; index ++) {
for (int index = 0; index < MemTagFactory::number_of_tags(); index ++) {
MemTag mem_tag = NMTUtil::index_to_tag(index);
if (mem_tag == mtNone) {
// Skip mtNone since it is not really used.
Expand Down
10 changes: 7 additions & 3 deletions src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,10 +348,14 @@ void CompilerTypeConstant::serialize(JfrCheckpointWriter& writer) {
}

void NMTTypeConstant::serialize(JfrCheckpointWriter& writer) {
writer.write_count(mt_number_of_tags);
for (int i = 0; i < mt_number_of_tags; ++i) {
writer.write_count(MemTagFactory::number_of_tags());
for (int i = 0; i < MemTagFactory::number_of_tags(); ++i) {
writer.write_key(i);
MemTag mem_tag = NMTUtil::index_to_tag(i);
writer.write(NMTUtil::tag_to_name(mem_tag));
const char* name = MemTagFactory::human_readable_name_of(mem_tag);
if (name == nullptr) {
name = MemTagFactory::name_of(mem_tag);
}
writer.write(name);
}
}
68 changes: 68 additions & 0 deletions src/hotspot/share/nmt/contiguousAllocator.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#include "nmt/contiguousAllocator.hpp"
#include "nmt/memTracker.hpp"
#include "runtime/os.hpp"

char* NMTContiguousAllocator::reserve_virtual_address_range() {
char* addr = os::pd_reserve_memory(_size, false);
assert(addr == nullptr || is_aligned(addr, _chunk_size), "must be");

return addr;
}

char* NMTContiguousAllocator::allocate_chunk(size_t requested_size) {
char* next_offset = this->_offset + requested_size;

if (next_offset > _start + this->_size) {
return nullptr;
}

if (next_offset <= _committed_boundary) {
char* addr = _offset;
this->_offset = next_offset;
return addr;
}
// Commit the missing amount of memory in page-sized chunks
size_t bytes_available = _committed_boundary - _offset;
size_t chunk_size_missing = align_up(requested_size - bytes_available, _chunk_size);

bool success = os::pd_commit_memory(this->_committed_boundary, chunk_size_missing, false);
if (!success) {
return nullptr;
}

this->_committed_boundary += chunk_size_missing;

char* addr = this->_offset;
this->_offset = next_offset;
return addr;
}

NMTContiguousAllocator::~NMTContiguousAllocator() {
if (is_reserved()) {
unreserve();
}
}
190 changes: 190 additions & 0 deletions src/hotspot/share/nmt/contiguousAllocator.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#include "nmt/memTag.hpp"
#include "utilities/align.hpp"
#include "runtime/os.hpp"

#include <limits>
#include <stdlib.h>
#include <type_traits>

#ifndef SHARE_NMT_CONTIGUOUSALLOCATOR_HPP
#define SHARE_NMT_CONTIGUOUSALLOCATOR_HPP

class VirtualMemoryTracker;

class NMTContiguousAllocator {
friend class ContiguousAllocatorTestFixture;

char* reserve_virtual_address_range();
char* allocate_chunk(size_t requested_size);
bool unreserve() {
return os::pd_release_memory(_start, _size);
}

public:
MemTag _flag;
size_t _size;
size_t _chunk_size;
char* _start; // Start of memory
char* _offset; // Last returned point of allocation
char* _committed_boundary; // Anything below this is paged in, invariant: is_aligned with VM page size
NMTContiguousAllocator(size_t size, MemTag flag)
: _flag(flag), _size(align_up(size, os::vm_page_size())),
_chunk_size(os::vm_page_size()),
_start(reserve_virtual_address_range()),
_offset(_start),
_committed_boundary(_start) {}

NMTContiguousAllocator(const NMTContiguousAllocator& other)
: _flag(other._flag),
_size(other._size),
_chunk_size(os::vm_page_size()),
_start(reserve_virtual_address_range()),
_offset(_start),
_committed_boundary(_start) {
char* alloc_addr = this->alloc(other._committed_boundary - other._start);
if (alloc_addr == nullptr) {
unreserve();
_start = nullptr;
_size = 0;
return;
}
size_t bytes_allocated = other._offset - other._start;
memcpy(alloc_addr, other._start, bytes_allocated);
_offset = _start + bytes_allocated;
}

~NMTContiguousAllocator();

char* alloc(size_t size) {
assert(is_reserved(), "must be");
return allocate_chunk(size);
}

size_t size() const { assert(is_reserved(), "must be"); return _size; }
size_t amount_committed() const { assert(is_reserved(), "must be"); return _committed_boundary - _start;}

char* at_offset(size_t offset) {
assert(is_reserved(), "must be");
char* loc = _start + offset;
assert(loc < _offset, "must be");
return loc;
}

bool is_reserved() const {
return _start != nullptr;
}

bool reserve_memory() {
if (!is_reserved()) {
char* addr = reserve_virtual_address_range();
if (addr != nullptr) {
this->_start = addr;
assert(is_aligned(this->_start, this->_chunk_size), "must be");
this->_offset = _start;
return true;
}
}
return false;
}

void register_virtual_memory_usage(VirtualMemoryTracker& tracker);
};

// A static array which is backed by a NMTContiguousAllocator.
// The IndexType is used in order to minimize the size of index references to this array.
template<typename T, typename IType>
class NMTStaticArray {
protected:
using IndexType = IType;
using ThisArray = NMTStaticArray<T, IndexType>;
NMTContiguousAllocator _allocator;
IndexType _num_allocated;
const static size_t _max_reserved_size =
sizeof(T) * static_cast<size_t>(std::numeric_limits<IndexType>::max());

public:

NMTStaticArray(size_t size = 0)
: _allocator(size == 0 ? _max_reserved_size : size, mtNMT),
_num_allocated(0) {}

// Snapshotting constructor
NMTStaticArray(const ThisArray& original)
: _allocator(original._allocator),
_num_allocated(original._num_allocated) {}

T* adr_at(IndexType index) {
if (_num_allocated <= index) {
IndexType number_of_indices_to_allocate = index - _num_allocated + 1;
char* ret = _allocator.alloc(number_of_indices_to_allocate * sizeof(T));
if (ret == nullptr) {
return nullptr;
}
_num_allocated += number_of_indices_to_allocate;
// Initialize the memory
T* base = reinterpret_cast<T*>(_allocator.at_offset(0));
for (size_t mm = _num_allocated; mm <= index; mm++) {
new (&base[mm]) T();
}
}
char* offset = _allocator.at_offset(sizeof(T) * index);
return (T*)offset;
}

const T* adr_at(IndexType index) const {
return const_cast<ThisArray*>(this)->adr_at((IndexType)index);
}

T& operator[](IndexType i) {
return *adr_at(i);
}

const T& operator[](IndexType i) const {
return *const_cast<ThisArray*>(this)->adr_at(i);
}

T& operator[](int i) {
assert(i <= std::numeric_limits<IndexType>::max(), "must be");
return *adr_at((IndexType)i);
}

const T& operator[](int i) const {
assert(i <= std::numeric_limits<IndexType>::max(), "must be");
return *const_cast<ThisArray*>(this)->adr_at((IndexType)i);
}


IndexType number_of_tags_allocated() {
return _num_allocated;
}

bool is_valid() {
return _allocator.is_reserved();
}
};

#endif // SHARE_NMT_CONTIGUOUSALLOCATOR_HPP
Loading