1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1ALLOCATOR_INLINE_HPP
  26 #define SHARE_GC_G1_G1ALLOCATOR_INLINE_HPP
  27 
  28 #include "gc/g1/g1Allocator.hpp"
  29 #include "gc/g1/g1AllocRegion.inline.hpp"
  30 #include "gc/shared/plab.inline.hpp"
  31 #include "memory/universe.hpp"
  32 
  33 inline uint G1Allocator::current_node_index() const {
  34   return _numa->index_of_current_thread();
  35 }
  36 
  37 inline MutatorAllocRegion* G1Allocator::mutator_alloc_region(uint node_index) {
  38   assert(node_index < _num_alloc_regions, "Invalid index: %u", node_index);
  39   return &_mutator_alloc_regions[node_index];
  40 }
  41 
  42 inline SurvivorGCAllocRegion* G1Allocator::survivor_gc_alloc_region(uint node_index) {
  43   assert(node_index < _num_alloc_regions, "Invalid index: %u", node_index);
  44   return &_survivor_gc_alloc_regions[node_index];
  45 }
  46 
  47 inline OldGCAllocRegion* G1Allocator::old_gc_alloc_region() {
  48   return &_old_gc_alloc_region;
  49 }
  50 
  51 inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size,
  52                                                  size_t desired_word_size,
  53                                                  size_t* actual_word_size) {
  54   uint node_index = current_node_index();
  55   HeapWord* result = mutator_alloc_region(node_index)->attempt_retained_allocation(min_word_size, desired_word_size, actual_word_size);
  56   if (result != NULL) {
  57     return result;
  58   }
  59   return mutator_alloc_region(node_index)->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
  60 }
  61 
  62 inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
  63   uint node_index = current_node_index();
  64   HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_locked(word_size);
  65   assert(result != NULL || mutator_alloc_region(node_index)->get() == NULL,
  66          "Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(node_index)->get()));
  67   return result;
  68 }
  69 
  70 inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
  71   uint node_index = current_node_index();
  72   return mutator_alloc_region(node_index)->attempt_allocation_force(word_size);
  73 }
  74 
  75 inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest, uint node_index) const {
  76   assert(dest.is_valid(),
  77          "Allocation buffer index out of bounds: %s", dest.get_type_str());
  78   assert(_alloc_buffers[dest.type()] != NULL,
  79          "Allocation buffer is NULL: %s", dest.get_type_str());
  80   return alloc_buffer(dest.type(), node_index);
  81 }
  82 
  83 inline PLAB* G1PLABAllocator::alloc_buffer(region_type_t dest, uint node_index) const {
  84   assert(dest < G1HeapRegionAttr::Num,
  85          "Allocation buffer index out of bounds: %u", dest);
  86 
  87   if (dest == G1HeapRegionAttr::Young) {
  88     assert(node_index < alloc_buffers_length(dest),
  89            "Allocation buffer index out of bounds: %u, %u", dest, node_index);
  90     return _alloc_buffers[dest][node_index];
  91   } else {
  92     return _alloc_buffers[dest][0];
  93   }
  94 }
  95 
  96 inline uint G1PLABAllocator::alloc_buffers_length(region_type_t dest) const {
  97   if (dest == G1HeapRegionAttr::Young) {
  98     return _allocator->num_nodes();
  99   } else {
 100     return 1;
 101   }
 102 }
 103 
 104 inline HeapWord* G1PLABAllocator::plab_allocate(G1HeapRegionAttr dest,
 105                                                 size_t word_sz,
 106                                                 uint node_index) {
 107   PLAB* buffer = alloc_buffer(dest, node_index);
 108   if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
 109     return buffer->allocate(word_sz);
 110   } else {
 111     return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 112   }
 113 }
 114 
 115 inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
 116                                            size_t word_sz,
 117                                            bool* refill_failed,
 118                                            uint node_index) {
 119   HeapWord* const obj = plab_allocate(dest, word_sz, node_index);
 120   if (obj != NULL) {
 121     return obj;
 122   }
 123   return allocate_direct_or_new_plab(dest, word_sz, refill_failed, node_index);
 124 }
 125 
 126 // Create the maps which is used to identify archive objects.
 127 inline void G1ArchiveAllocator::enable_archive_object_check() {
 128   if (_archive_check_enabled) {
 129     return;
 130   }
 131 
 132   _archive_check_enabled = true;
 133   _archive_region_map.initialize(G1CollectedHeap::heap()->reserved(),
 134                                  HeapRegion::GrainBytes);
 135 }
 136 
 137 // Set the regions containing the specified address range as archive.
 138 inline void G1ArchiveAllocator::set_range_archive(MemRegion range, bool open) {
 139   assert(_archive_check_enabled, "archive range check not enabled");
 140   log_info(gc, cds)("Mark %s archive regions in map: [" PTR_FORMAT ", " PTR_FORMAT "]",
 141                      open ? "open" : "closed",
 142                      p2i(range.start()),
 143                      p2i(range.last()));
 144   uint8_t const value = open ? G1ArchiveRegionMap::OpenArchive : G1ArchiveRegionMap::ClosedArchive;
 145   _archive_region_map.set_by_address(range, value);
 146 }
 147 
 148 // Clear the archive regions map containing the specified address range.
 149 inline void G1ArchiveAllocator::clear_range_archive(MemRegion range) {
 150   assert(_archive_check_enabled, "archive range check not enabled");
 151   log_info(gc, cds)("Clear archive regions in map: [" PTR_FORMAT ", " PTR_FORMAT "]",
 152                     p2i(range.start()),
 153                     p2i(range.last()));
 154   _archive_region_map.set_by_address(range, G1ArchiveRegionMap::NoArchive);
 155 }
 156 
 157 // Check if an object is in a closed archive region using the _archive_region_map.
 158 inline bool G1ArchiveAllocator::in_closed_archive_range(oop object) {
 159   return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::ClosedArchive;
 160 }
 161 
 162 inline bool G1ArchiveAllocator::in_open_archive_range(oop object) {
 163   return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::OpenArchive;
 164 }
 165 
 166 // Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range
 167 // unnecessarily.
 168 inline bool G1ArchiveAllocator::archive_check_enabled() {
 169   return _archive_check_enabled;
 170 }
 171 
 172 inline bool G1ArchiveAllocator::is_closed_archive_object(oop object) {
 173   return (archive_check_enabled() && in_closed_archive_range(object));
 174 }
 175 
 176 inline bool G1ArchiveAllocator::is_open_archive_object(oop object) {
 177   return (archive_check_enabled() && in_open_archive_range(object));
 178 }
 179 
 180 inline bool G1ArchiveAllocator::is_archived_object(oop object) {
 181   return archive_check_enabled() &&
 182          (_archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) != G1ArchiveRegionMap::NoArchive);
 183 }
 184 
 185 #endif // SHARE_GC_G1_G1ALLOCATOR_INLINE_HPP