< prev index next >

src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp

Print this page
rev 56448 : imported patch 8220310.mut.0
rev 56451 : imported patch 8220310.mut.1-3_kim
rev 56454 : [mq]: 8220310.mut.2-evensplit

*** 1,7 **** /* ! * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 21,47 **** * questions. * */ #include "precompiled.hpp" #include "gc/g1/g1PageBasedVirtualSpace.hpp" #include "gc/shared/workgroup.hpp" #include "oops/markWord.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/os.inline.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/bitMap.inline.hpp" ! G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) : _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0), ! _committed(mtGC), _dirty(mtGC), _special(false), _executable(false) { ! initialize_with_page_size(rs, used_size, page_size); } ! void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) { guarantee(rs.is_reserved(), "Given reserved space must have been reserved already."); vmassert(_low_boundary == NULL, "VirtualSpace already initialized"); vmassert(page_size > 0, "Page size must be non-zero."); --- 21,48 ---- * questions. * */ #include "precompiled.hpp" + #include "gc/g1/g1NUMA.inline.hpp" #include "gc/g1/g1PageBasedVirtualSpace.hpp" #include "gc/shared/workgroup.hpp" #include "oops/markWord.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/os.inline.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/bitMap.inline.hpp" ! G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size, MemoryType type) : _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0), ! _committed(mtGC), _dirty(mtGC), _special(false), _executable(false), _numa(NULL) { ! initialize_with_page_size(rs, used_size, page_size, type); } ! void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size, MemoryType type) { guarantee(rs.is_reserved(), "Given reserved space must have been reserved already."); vmassert(_low_boundary == NULL, "VirtualSpace already initialized"); vmassert(page_size > 0, "Page size must be non-zero.");
*** 68,77 **** --- 69,85 ---- if (_special) { _dirty.initialize(size_in_pages); } _tail_size = used_size % _page_size; + + // Set _numa only if: + // 1) This space is for java heap. + // 2) There are multiple memory nodes because some OSes allow enabling UseNUMA. + if (type == mtJavaHeap && G1MemoryNodeManager::mgr()->num_active_nodes() > 1) { + _numa = G1NUMA::numa(); + } } G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { // This does not release memory it never reserved. // Caller must release via rs.release();
*** 79,88 **** --- 87,97 ---- _high_boundary = NULL; _special = false; _executable = false; _page_size = 0; _tail_size = 0; + _numa = NULL; } size_t G1PageBasedVirtualSpace::committed_size() const { size_t result = _committed.count_one_bits() * _page_size; // The last page might not be in full.
*** 205,214 **** --- 214,229 ---- } else { commit_internal(start_page, end_page); } _committed.set_range(start_page, end_page); + if (_numa != NULL) { + char* start_addr = page_start(start_page); + size_t size_in_bytes = size_in_pages * _page_size; + _numa->request_memory_on_node((address)start_addr, size_in_bytes); + } + return zero_filled; } void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) { guarantee(start_page < end_page,
< prev index next >