< prev index next >

src/hotspot/share/gc/z/zObjectAllocator.cpp

Print this page




  46 ZObjectAllocator::ZObjectAllocator() :
  47     _use_per_cpu_shared_small_pages(ZHeuristics::use_per_cpu_shared_small_pages()),
  48     _used(0),
  49     _undone(0),
  50     _shared_medium_page(NULL),
  51     _shared_small_page(NULL),
  52     _worker_small_page(NULL) {}
  53 
  54 ZPage** ZObjectAllocator::shared_small_page_addr() {
  55   return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
  56 }
  57 
  58 ZPage* const* ZObjectAllocator::shared_small_page_addr() const {
  59   return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
  60 }
  61 
  62 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
  63   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
  64   if (page != NULL) {
  65     // Increment used bytes
  66     Atomic::add(size, _used.addr());
  67   }
  68 
  69   return page;
  70 }
  71 
  72 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
  73   // Increment undone bytes
  74   Atomic::add(page->size(), _undone.addr());
  75 
  76   ZHeap::heap()->undo_alloc_page(page);
  77 }
  78 
  79 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  80                                                         uint8_t page_type,
  81                                                         size_t page_size,
  82                                                         size_t size,
  83                                                         ZAllocationFlags flags) {
  84   uintptr_t addr = 0;
  85   ZPage* page = Atomic::load_acquire(shared_page);
  86 
  87   if (page != NULL) {
  88     addr = page->alloc_object_atomic(size);
  89   }
  90 
  91   if (addr == 0) {
  92     // Allocate new page
  93     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  94     if (new_page != NULL) {




  46 ZObjectAllocator::ZObjectAllocator() :
  47     _use_per_cpu_shared_small_pages(ZHeuristics::use_per_cpu_shared_small_pages()),
  48     _used(0),
  49     _undone(0),
  50     _shared_medium_page(NULL),
  51     _shared_small_page(NULL),
  52     _worker_small_page(NULL) {}
  53 
  54 ZPage** ZObjectAllocator::shared_small_page_addr() {
  55   return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
  56 }
  57 
  58 ZPage* const* ZObjectAllocator::shared_small_page_addr() const {
  59   return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
  60 }
  61 
  62 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
  63   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
  64   if (page != NULL) {
  65     // Increment used bytes
  66     Atomic::add(_used.addr(), size);
  67   }
  68 
  69   return page;
  70 }
  71 
  72 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
  73   // Increment undone bytes
  74   Atomic::add(_undone.addr(), page->size());
  75 
  76   ZHeap::heap()->undo_alloc_page(page);
  77 }
  78 
  79 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  80                                                         uint8_t page_type,
  81                                                         size_t page_size,
  82                                                         size_t size,
  83                                                         ZAllocationFlags flags) {
  84   uintptr_t addr = 0;
  85   ZPage* page = Atomic::load_acquire(shared_page);
  86 
  87   if (page != NULL) {
  88     addr = page->alloc_object_atomic(size);
  89   }
  90 
  91   if (addr == 0) {
  92     // Allocate new page
  93     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  94     if (new_page != NULL) {


< prev index next >