< prev index next >

src/hotspot/share/gc/z/zObjectAllocator.cpp

Print this page




  65     // Increment used bytes
  66     Atomic::add(size, _used.addr());
  67   }
  68 
  69   return page;
  70 }
  71 
  72 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
  73   // Increment undone bytes
  74   Atomic::add(page->size(), _undone.addr());
  75 
  76   ZHeap::heap()->undo_alloc_page(page);
  77 }
  78 
  79 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  80                                                         uint8_t page_type,
  81                                                         size_t page_size,
  82                                                         size_t size,
  83                                                         ZAllocationFlags flags) {
  84   uintptr_t addr = 0;
  85   ZPage* page = OrderAccess::load_acquire(shared_page);
  86 
  87   if (page != NULL) {
  88     addr = page->alloc_object_atomic(size);
  89   }
  90 
  91   if (addr == 0) {
  92     // Allocate new page
  93     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  94     if (new_page != NULL) {
  95       // Allocate object before installing the new page
  96       addr = new_page->alloc_object(size);
  97 
  98     retry:
  99       // Install new page
 100       ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
 101       if (prev_page != page) {
 102         if (prev_page == NULL) {
 103           // Previous page was retired, retry installing the new page
 104           page = prev_page;
 105           goto retry;


 287 size_t ZObjectAllocator::used() const {
 288   size_t total_used = 0;
 289   size_t total_undone = 0;
 290 
 291   ZPerCPUConstIterator<size_t> iter_used(&_used);
 292   for (const size_t* cpu_used; iter_used.next(&cpu_used);) {
 293     total_used += *cpu_used;
 294   }
 295 
 296   ZPerCPUConstIterator<size_t> iter_undone(&_undone);
 297   for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) {
 298     total_undone += *cpu_undone;
 299   }
 300 
 301   return total_used - total_undone;
 302 }
 303 
 304 size_t ZObjectAllocator::remaining() const {
 305   assert(ZThread::is_java(), "Should be a Java thread");
 306 
 307   const ZPage* const page = OrderAccess::load_acquire(shared_small_page_addr());
 308   if (page != NULL) {
 309     return page->remaining();
 310   }
 311 
 312   return 0;
 313 }
 314 
 315 void ZObjectAllocator::retire_pages() {
 316   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 317 
 318   // Reset used and undone bytes
 319   _used.set_all(0);
 320   _undone.set_all(0);
 321 
 322   // Reset allocation pages
 323   _shared_medium_page.set(NULL);
 324   _shared_small_page.set_all(NULL);
 325   _worker_small_page.set_all(NULL);
 326 }


  65     // Increment used bytes
  66     Atomic::add(size, _used.addr());
  67   }
  68 
  69   return page;
  70 }
  71 
  72 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
  73   // Increment undone bytes
  74   Atomic::add(page->size(), _undone.addr());
  75 
  76   ZHeap::heap()->undo_alloc_page(page);
  77 }
  78 
  79 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  80                                                         uint8_t page_type,
  81                                                         size_t page_size,
  82                                                         size_t size,
  83                                                         ZAllocationFlags flags) {
  84   uintptr_t addr = 0;
  85   ZPage* page = Atomic::load_acquire(shared_page);
  86 
  87   if (page != NULL) {
  88     addr = page->alloc_object_atomic(size);
  89   }
  90 
  91   if (addr == 0) {
  92     // Allocate new page
  93     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  94     if (new_page != NULL) {
  95       // Allocate object before installing the new page
  96       addr = new_page->alloc_object(size);
  97 
  98     retry:
  99       // Install new page
 100       ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
 101       if (prev_page != page) {
 102         if (prev_page == NULL) {
 103           // Previous page was retired, retry installing the new page
 104           page = prev_page;
 105           goto retry;


 287 size_t ZObjectAllocator::used() const {
 288   size_t total_used = 0;
 289   size_t total_undone = 0;
 290 
 291   ZPerCPUConstIterator<size_t> iter_used(&_used);
 292   for (const size_t* cpu_used; iter_used.next(&cpu_used);) {
 293     total_used += *cpu_used;
 294   }
 295 
 296   ZPerCPUConstIterator<size_t> iter_undone(&_undone);
 297   for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) {
 298     total_undone += *cpu_undone;
 299   }
 300 
 301   return total_used - total_undone;
 302 }
 303 
 304 size_t ZObjectAllocator::remaining() const {
 305   assert(ZThread::is_java(), "Should be a Java thread");
 306 
 307   const ZPage* const page = Atomic::load_acquire(shared_small_page_addr());
 308   if (page != NULL) {
 309     return page->remaining();
 310   }
 311 
 312   return 0;
 313 }
 314 
 315 void ZObjectAllocator::retire_pages() {
 316   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 317 
 318   // Reset used and undone bytes
 319   _used.set_all(0);
 320   _undone.set_all(0);
 321 
 322   // Reset allocation pages
 323   _shared_medium_page.set(NULL);
 324   _shared_small_page.set_all(NULL);
 325   _worker_small_page.set_all(NULL);
 326 }
< prev index next >