1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/oopStorage.inline.hpp" 27 #include "gc/shared/oopStorageParState.inline.hpp" 28 #include "logging/log.hpp" 29 #include "logging/logStream.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/atomic.hpp" 33 #include "runtime/handles.inline.hpp" 34 #include "runtime/mutex.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/orderAccess.inline.hpp" 37 #include "runtime/safepoint.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/align.hpp" 40 #include "utilities/count_trailing_zeros.hpp" 41 #include "utilities/debug.hpp" 42 #include "utilities/globalDefinitions.hpp" 43 #include "utilities/macros.hpp" 44 #include "utilities/ostream.hpp" 45 46 OopStorage::BlockEntry::BlockEntry() : _prev(NULL), _next(NULL) {} 47 48 OopStorage::BlockEntry::~BlockEntry() { 49 assert(_prev == NULL, "deleting attached block"); 50 assert(_next == NULL, "deleting attached block"); 51 } 52 53 OopStorage::BlockList::BlockList(const BlockEntry& (*get_entry)(const Block& block)) : 54 _head(NULL), _tail(NULL), _get_entry(get_entry) 55 {} 56 57 OopStorage::BlockList::~BlockList() { 58 // ~OopStorage() empties its lists before destroying them. 59 assert(_head == NULL, "deleting non-empty block list"); 60 assert(_tail == NULL, "deleting non-empty block list"); 61 } 62 63 void OopStorage::BlockList::push_front(const Block& block) { 64 const Block* old = _head; 65 if (old == NULL) { 66 assert(_tail == NULL, "invariant"); 67 _head = _tail = █ 68 } else { 69 _get_entry(block)._next = old; 70 _get_entry(*old)._prev = █ 71 _head = █ 72 } 73 } 74 75 void OopStorage::BlockList::push_back(const Block& block) { 76 const Block* old = _tail; 77 if (old == NULL) { 78 assert(_head == NULL, "invariant"); 79 _head = _tail = █ 80 } else { 81 _get_entry(*old)._next = █ 82 _get_entry(block)._prev = old; 83 _tail = █ 84 } 85 } 86 87 void OopStorage::BlockList::unlink(const Block& block) { 88 const BlockEntry& block_entry = _get_entry(block); 89 const Block* prev_blk = block_entry._prev; 90 const Block* next_blk = block_entry._next; 91 block_entry._prev = NULL; 92 block_entry._next = NULL; 93 if ((prev_blk == NULL) && (next_blk == NULL)) { 94 assert(_head == &block, "invariant"); 95 assert(_tail == &block, "invariant"); 96 _head = _tail = NULL; 97 } else if (prev_blk == NULL) { 98 assert(_head == &block, "invariant"); 99 _get_entry(*next_blk)._prev = NULL; 100 _head = next_blk; 101 } else if (next_blk == NULL) { 102 assert(_tail == &block, "invariant"); 103 _get_entry(*prev_blk)._next = NULL; 104 _tail = prev_blk; 105 } else { 106 _get_entry(*next_blk)._prev = prev_blk; 107 _get_entry(*prev_blk)._next = next_blk; 108 } 109 } 110 111 // Blocks start with an array of BitsPerWord oop entries. That array 112 // is divided into conceptual BytesPerWord sections of BitsPerByte 113 // entries. Blocks are allocated aligned on section boundaries, for 114 // the convenience of mapping from an entry to the containing block; 115 // see block_for_ptr(). Aligning on section boundary rather than on 116 // the full _data wastes a lot less space, but makes for a bit more 117 // work in block_for_ptr(). 118 119 const unsigned section_size = BitsPerByte; 120 const unsigned section_count = BytesPerWord; 121 const unsigned block_alignment = sizeof(oop) * section_size; 122 123 // VS2013 warns (C4351) that elements of _data will be *correctly* default 124 // initialized, unlike earlier versions that *incorrectly* did not do so. 125 #ifdef _WINDOWS 126 #pragma warning(push) 127 #pragma warning(disable: 4351) 128 #endif // _WINDOWS 129 OopStorage::Block::Block(const OopStorage* owner, void* memory) : 130 _data(), 131 _allocated_bitmask(0), 132 _owner(owner), 133 _memory(memory), 134 _active_entry(), 135 _allocate_entry(), 136 _deferred_updates_next(NULL), 137 _release_refcount(0) 138 { 139 STATIC_ASSERT(_data_pos == 0); 140 STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data)); 141 assert(offset_of(Block, _data) == _data_pos, "invariant"); 142 assert(owner != NULL, "NULL owner"); 143 assert(is_aligned(this, block_alignment), "misaligned block"); 144 } 145 #ifdef _WINDOWS 146 #pragma warning(pop) 147 #endif 148 149 OopStorage::Block::~Block() { 150 assert(_release_refcount == 0, "deleting block while releasing"); 151 assert(_deferred_updates_next == NULL, "deleting block with deferred update"); 152 // Clear fields used by block_for_ptr and entry validation, which 153 // might help catch bugs. Volatile to prevent dead-store elimination. 154 const_cast<uintx volatile&>(_allocated_bitmask) = 0; 155 const_cast<OopStorage* volatile&>(_owner) = NULL; 156 } 157 158 const OopStorage::BlockEntry& OopStorage::Block::get_active_entry(const Block& block) { 159 return block._active_entry; 160 } 161 162 const OopStorage::BlockEntry& OopStorage::Block::get_allocate_entry(const Block& block) { 163 return block._allocate_entry; 164 } 165 166 size_t OopStorage::Block::allocation_size() { 167 // _data must be first member, so aligning Block aligns _data. 168 STATIC_ASSERT(_data_pos == 0); 169 return sizeof(Block) + block_alignment - sizeof(void*); 170 } 171 172 size_t OopStorage::Block::allocation_alignment_shift() { 173 return exact_log2(block_alignment); 174 } 175 176 inline bool is_full_bitmask(uintx bitmask) { return ~bitmask == 0; } 177 inline bool is_empty_bitmask(uintx bitmask) { return bitmask == 0; } 178 179 bool OopStorage::Block::is_full() const { 180 return is_full_bitmask(allocated_bitmask()); 181 } 182 183 bool OopStorage::Block::is_empty() const { 184 return is_empty_bitmask(allocated_bitmask()); 185 } 186 187 uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const { 188 return bitmask_for_index(get_index(ptr)); 189 } 190 191 // A block is deletable if 192 // (1) It is empty. 193 // (2) There is not a release() operation currently operating on it. 194 // (3) It is not in the deferred updates list. 195 // The order of tests is important for proper interaction between release() 196 // and concurrent deletion. 197 bool OopStorage::Block::is_deletable() const { 198 return (OrderAccess::load_acquire(&_allocated_bitmask) == 0) && 199 (OrderAccess::load_acquire(&_release_refcount) == 0) && 200 (OrderAccess::load_acquire(&_deferred_updates_next) == NULL); 201 } 202 203 OopStorage::Block* OopStorage::Block::deferred_updates_next() const { 204 return _deferred_updates_next; 205 } 206 207 void OopStorage::Block::set_deferred_updates_next(Block* block) { 208 _deferred_updates_next = block; 209 } 210 211 bool OopStorage::Block::contains(const oop* ptr) const { 212 const oop* base = get_pointer(0); 213 return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data))); 214 } 215 216 unsigned OopStorage::Block::get_index(const oop* ptr) const { 217 assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this)); 218 return static_cast<unsigned>(ptr - get_pointer(0)); 219 } 220 221 oop* OopStorage::Block::allocate() { 222 // Use CAS loop because release may change bitmask outside of lock. 223 uintx allocated = allocated_bitmask(); 224 while (true) { 225 assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); 226 unsigned index = count_trailing_zeros(~allocated); 227 uintx new_value = allocated | bitmask_for_index(index); 228 uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated); 229 if (fetched == allocated) { 230 return get_pointer(index); // CAS succeeded; return entry for index. 231 } 232 allocated = fetched; // CAS failed; retry with latest value. 233 } 234 } 235 236 OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) { 237 // _data must be first member: aligning block => aligning _data. 238 STATIC_ASSERT(_data_pos == 0); 239 size_t size_needed = allocation_size(); 240 void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC); 241 if (memory == NULL) { 242 return NULL; 243 } 244 void* block_mem = align_up(memory, block_alignment); 245 assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed, 246 "allocated insufficient space for aligned block"); 247 return ::new (block_mem) Block(owner, memory); 248 } 249 250 void OopStorage::Block::delete_block(const Block& block) { 251 void* memory = block._memory; 252 block.Block::~Block(); 253 FREE_C_HEAP_ARRAY(char, memory); 254 } 255 256 // This can return a false positive if ptr is not contained by some 257 // block. For some uses, it is a precondition that ptr is valid, 258 // e.g. contained in some block in owner's _active_list. Other uses 259 // require additional validation of the result. 260 OopStorage::Block* 261 OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) { 262 assert(CanUseSafeFetchN(), "precondition"); 263 STATIC_ASSERT(_data_pos == 0); 264 // Const-ness of ptr is not related to const-ness of containing block. 265 // Blocks are allocated section-aligned, so get the containing section. 266 oop* section_start = align_down(const_cast<oop*>(ptr), block_alignment); 267 // Start with a guess that the containing section is the last section, 268 // so the block starts section_count-1 sections earlier. 269 oop* section = section_start - (section_size * (section_count - 1)); 270 // Walk up through the potential block start positions, looking for 271 // the owner in the expected location. If we're below the actual block 272 // start position, the value at the owner position will be some oop 273 // (possibly NULL), which can never match the owner. 274 intptr_t owner_addr = reinterpret_cast<intptr_t>(owner); 275 for (unsigned i = 0; i < section_count; ++i, section += section_size) { 276 Block* candidate = reinterpret_cast<Block*>(section); 277 intptr_t* candidate_owner_addr 278 = reinterpret_cast<intptr_t*>(&candidate->_owner); 279 if (SafeFetchN(candidate_owner_addr, 0) == owner_addr) { 280 return candidate; 281 } 282 } 283 return NULL; 284 } 285 286 ////////////////////////////////////////////////////////////////////////////// 287 // Allocation 288 // 289 // Allocation involves the _allocate_list, which contains a subset of the 290 // blocks owned by a storage object. This is a doubly-linked list, linked 291 // through dedicated fields in the blocks. Full blocks are removed from this 292 // list, though they are still present in the _active_list. Empty blocks are 293 // kept at the end of the _allocate_list, to make it easy for empty block 294 // deletion to find them. 295 // 296 // allocate(), and delete_empty_blocks_concurrent() lock the 297 // _allocate_mutex while performing any list modifications. 298 // 299 // allocate() and release() update a block's _allocated_bitmask using CAS 300 // loops. This prevents loss of updates even though release() performs 301 // its updates without any locking. 302 // 303 // allocate() obtains the entry from the first block in the _allocate_list, 304 // and updates that block's _allocated_bitmask to indicate the entry is in 305 // use. If this makes the block full (all entries in use), the block is 306 // removed from the _allocate_list so it won't be considered by future 307 // allocations until some entries in it are released. 308 // 309 // release() is performed lock-free. release() first looks up the block for 310 // the entry, using address alignment to find the enclosing block (thereby 311 // avoiding iteration over the _active_list). Once the block has been 312 // determined, its _allocated_bitmask needs to be updated, and its position in 313 // the _allocate_list may need to be updated. There are two cases: 314 // 315 // (a) If the block is neither full nor would become empty with the release of 316 // the entry, only its _allocated_bitmask needs to be updated. But if the CAS 317 // update fails, the applicable case may change for the retry. 318 // 319 // (b) Otherwise, the _allocate_list also needs to be modified. This requires 320 // locking the _allocate_mutex. To keep the release() operation lock-free, 321 // rather than updating the _allocate_list itself, it instead performs a 322 // lock-free push of the block onto the _deferred_updates list. Entries on 323 // that list are processed by allocate() and delete_empty_blocks_XXX(), while 324 // they already hold the necessary lock. That processing makes the block's 325 // list state consistent with its current _allocated_bitmask. The block is 326 // added to the _allocate_list if not already present and the bitmask is not 327 // full. The block is moved to the end of the _allocated_list if the bitmask 328 // is empty, for ease of empty block deletion processing. 329 330 oop* OopStorage::allocate() { 331 MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); 332 // Do some deferred update processing every time we allocate. 333 // Continue processing deferred updates if _allocate_list is empty, 334 // in the hope that we'll get a block from that, rather than 335 // allocating a new block. 336 while (reduce_deferred_updates() && (_allocate_list.head() == NULL)) {} 337 338 // Use the first block in _allocate_list for the allocation. 339 Block* block = _allocate_list.head(); 340 if (block == NULL) { 341 // No available blocks; make a new one, and add to storage. 342 { 343 MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag); 344 block = Block::new_block(this); 345 } 346 if (block == NULL) { 347 while (_allocate_list.head() == NULL) { 348 if (!reduce_deferred_updates()) { 349 // Failed to make new block, no other thread made a block 350 // available while the mutex was released, and didn't get 351 // one from a deferred update either, so return failure. 352 log_info(oopstorage, ref)("%s: failed allocation", name()); 353 return NULL; 354 } 355 } 356 } else { 357 // Add new block to storage. 358 log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block)); 359 360 // Add to end of _allocate_list. The mutex release allowed 361 // other threads to add blocks to the _allocate_list. We prefer 362 // to allocate from non-empty blocks, to allow empty blocks to 363 // be deleted. 364 _allocate_list.push_back(*block); 365 // Add to front of _active_list, and then record as the head 366 // block, for concurrent iteration protocol. 367 _active_list.push_front(*block); 368 ++_block_count; 369 // Ensure all setup of block is complete before making it visible. 370 OrderAccess::release_store(&_active_head, block); 371 } 372 block = _allocate_list.head(); 373 } 374 // Allocate from first block. 375 assert(block != NULL, "invariant"); 376 assert(!block->is_full(), "invariant"); 377 if (block->is_empty()) { 378 // Transitioning from empty to not empty. 379 log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); 380 } 381 oop* result = block->allocate(); 382 assert(result != NULL, "allocation failed"); 383 assert(!block->is_empty(), "postcondition"); 384 Atomic::inc(&_allocation_count); // release updates outside lock. 385 if (block->is_full()) { 386 // Transitioning from not full to full. 387 // Remove full blocks from consideration by future allocates. 388 log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); 389 _allocate_list.unlink(*block); 390 } 391 log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); 392 return result; 393 } 394 395 OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const { 396 assert(ptr != NULL, "precondition"); 397 return Block::block_for_ptr(this, ptr); 398 } 399 400 static void log_release_transitions(uintx releasing, 401 uintx old_allocated, 402 const OopStorage* owner, 403 const void* block) { 404 ResourceMark rm; 405 Log(oopstorage, blocks) log; 406 LogStream ls(log.debug()); 407 if (is_full_bitmask(old_allocated)) { 408 ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block)); 409 } 410 if (releasing == old_allocated) { 411 ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block)); 412 } 413 } 414 415 void OopStorage::Block::release_entries(uintx releasing, Block* volatile* deferred_list) { 416 assert(releasing != 0, "preconditon"); 417 // Prevent empty block deletion when transitioning to empty. 418 Atomic::inc(&_release_refcount); 419 420 // Atomically update allocated bitmask. 421 uintx old_allocated = _allocated_bitmask; 422 while (true) { 423 assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); 424 uintx new_value = old_allocated ^ releasing; 425 uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated); 426 if (fetched == old_allocated) break; // Successful update. 427 old_allocated = fetched; // Retry with updated bitmask. 428 } 429 430 // Now that the bitmask has been updated, if we have a state transition 431 // (updated bitmask is empty or old bitmask was full), atomically push 432 // this block onto the deferred updates list. Some future call to 433 // reduce_deferred_updates will make any needed changes related to this 434 // block and _allocate_list. This deferral avoids list updates and the 435 // associated locking here. 436 if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { 437 // Log transitions. Both transitions are possible in a single update. 438 if (log_is_enabled(Debug, oopstorage, blocks)) { 439 log_release_transitions(releasing, old_allocated, _owner, this); 440 } 441 // Attempt to claim responsibility for adding this block to the deferred 442 // list, by setting the link to non-NULL by self-looping. If this fails, 443 // then someone else has made such a claim and the deferred update has not 444 // yet been processed and will include our change, so we don't need to do 445 // anything further. 446 if (Atomic::replace_if_null(this, &_deferred_updates_next)) { 447 // Successfully claimed. Push, with self-loop for end-of-list. 448 Block* head = *deferred_list; 449 while (true) { 450 _deferred_updates_next = (head == NULL) ? this : head; 451 Block* fetched = Atomic::cmpxchg(this, deferred_list, head); 452 if (fetched == head) break; // Successful update. 453 head = fetched; // Retry with updated head. 454 } 455 log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT, 456 _owner->name(), p2i(this)); 457 } 458 } 459 // Release hold on empty block deletion. 460 Atomic::dec(&_release_refcount); 461 } 462 463 // Process one available deferred update. Returns true if one was processed. 464 bool OopStorage::reduce_deferred_updates() { 465 assert_locked_or_safepoint(_allocate_mutex); 466 // Atomically pop a block off the list, if any available. 467 // No ABA issue because this is only called by one thread at a time. 468 // The atomicity is wrto pushes by release(). 469 Block* block = OrderAccess::load_acquire(&_deferred_updates); 470 while (true) { 471 if (block == NULL) return false; 472 // Try atomic pop of block from list. 473 Block* tail = block->deferred_updates_next(); 474 if (block == tail) tail = NULL; // Handle self-loop end marker. 475 Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block); 476 if (fetched == block) break; // Update successful. 477 block = fetched; // Retry with updated block. 478 } 479 block->set_deferred_updates_next(NULL); // Clear tail after updating head. 480 // Ensure bitmask read after pop is complete, including clearing tail, for 481 // ordering with release(). Without this, we may be processing a stale 482 // bitmask state here while blocking a release() operation from recording 483 // the deferred update needed for its bitmask change. 484 OrderAccess::storeload(); 485 // Process popped block. 486 uintx allocated = block->allocated_bitmask(); 487 488 // Make membership in list consistent with bitmask state. 489 if ((_allocate_list.ctail() != NULL) && 490 ((_allocate_list.ctail() == block) || 491 (_allocate_list.next(*block) != NULL))) { 492 // Block is in the allocate list. 493 assert(!is_full_bitmask(allocated), "invariant"); 494 } else if (!is_full_bitmask(allocated)) { 495 // Block is not in the allocate list, but now should be. 496 _allocate_list.push_front(*block); 497 } // Else block is full and not in list, which is correct. 498 499 // Move empty block to end of list, for possible deletion. 500 if (is_empty_bitmask(allocated)) { 501 _allocate_list.unlink(*block); 502 _allocate_list.push_back(*block); 503 } 504 505 log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT, 506 name(), p2i(block)); 507 return true; // Processed one pending update. 508 } 509 510 inline void check_release_entry(const oop* entry) { 511 assert(entry != NULL, "Releasing NULL"); 512 assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry)); 513 } 514 515 void OopStorage::release(const oop* ptr) { 516 check_release_entry(ptr); 517 Block* block = find_block_or_null(ptr); 518 assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr)); 519 log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr)); 520 block->release_entries(block->bitmask_for_entry(ptr), &_deferred_updates); 521 Atomic::dec(&_allocation_count); 522 } 523 524 void OopStorage::release(const oop* const* ptrs, size_t size) { 525 size_t i = 0; 526 while (i < size) { 527 check_release_entry(ptrs[i]); 528 Block* block = find_block_or_null(ptrs[i]); 529 assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptrs[i])); 530 log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i])); 531 size_t count = 0; 532 uintx releasing = 0; 533 for ( ; i < size; ++i) { 534 const oop* entry = ptrs[i]; 535 check_release_entry(entry); 536 // If entry not in block, finish block and resume outer loop with entry. 537 if (!block->contains(entry)) break; 538 // Add entry to releasing bitmap. 539 log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry)); 540 uintx entry_bitmask = block->bitmask_for_entry(entry); 541 assert((releasing & entry_bitmask) == 0, 542 "Duplicate entry: " PTR_FORMAT, p2i(entry)); 543 releasing |= entry_bitmask; 544 ++count; 545 } 546 // Release the contiguous entries that are in block. 547 block->release_entries(releasing, &_deferred_updates); 548 Atomic::sub(count, &_allocation_count); 549 } 550 } 551 552 const char* dup_name(const char* name) { 553 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC); 554 strcpy(dup, name); 555 return dup; 556 } 557 558 OopStorage::OopStorage(const char* name, 559 Mutex* allocate_mutex, 560 Mutex* active_mutex) : 561 _name(dup_name(name)), 562 _active_list(&Block::get_active_entry), 563 _allocate_list(&Block::get_allocate_entry), 564 _active_head(NULL), 565 _deferred_updates(NULL), 566 _allocate_mutex(allocate_mutex), 567 _active_mutex(active_mutex), 568 _allocation_count(0), 569 _block_count(0), 570 _concurrent_iteration_active(false) 571 { 572 assert(_active_mutex->rank() < _allocate_mutex->rank(), 573 "%s: active_mutex must have lower rank than allocate_mutex", _name); 574 assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, 575 "%s: active mutex requires safepoint check", _name); 576 assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, 577 "%s: allocate mutex requires safepoint check", _name); 578 } 579 580 void OopStorage::delete_empty_block(const Block& block) { 581 assert(block.is_empty(), "discarding non-empty block"); 582 log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block)); 583 Block::delete_block(block); 584 } 585 586 OopStorage::~OopStorage() { 587 Block* block; 588 while ((block = _deferred_updates) != NULL) { 589 _deferred_updates = block->deferred_updates_next(); 590 block->set_deferred_updates_next(NULL); 591 } 592 while ((block = _allocate_list.head()) != NULL) { 593 _allocate_list.unlink(*block); 594 } 595 while ((block = _active_list.head()) != NULL) { 596 _active_list.unlink(*block); 597 Block::delete_block(*block); 598 } 599 FREE_C_HEAP_ARRAY(char, _name); 600 } 601 602 void OopStorage::delete_empty_blocks_safepoint() { 603 assert_at_safepoint(); 604 // Process any pending release updates, which may make more empty 605 // blocks available for deletion. 606 while (reduce_deferred_updates()) {} 607 // Don't interfere with a concurrent iteration. 608 if (_concurrent_iteration_active) return; 609 // Delete empty (and otherwise deletable) blocks from end of _allocate_list. 610 for (const Block* block = _allocate_list.ctail(); 611 (block != NULL) && block->is_deletable(); 612 block = _allocate_list.ctail()) { 613 _active_list.unlink(*block); 614 _allocate_list.unlink(*block); 615 delete_empty_block(*block); 616 --_block_count; 617 } 618 // Update _active_head, in case current value was in deleted set. 619 _active_head = _active_list.head(); 620 } 621 622 void OopStorage::delete_empty_blocks_concurrent() { 623 MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); 624 // Other threads could be adding to the empty block count while we 625 // release the mutex across the block deletions. Set an upper bound 626 // on how many blocks we'll try to release, so other threads can't 627 // cause an unbounded stay in this function. 628 size_t limit = _block_count; 629 630 for (size_t i = 0; i < limit; ++i) { 631 // Additional updates might become available while we dropped the 632 // lock. But limit number processed to limit lock duration. 633 reduce_deferred_updates(); 634 635 const Block* block = _allocate_list.ctail(); 636 if ((block == NULL) || !block->is_deletable()) { 637 // No block to delete, so done. There could be more pending 638 // deferred updates that could give us more work to do; deal with 639 // that in some later call, to limit lock duration here. 640 return; 641 } 642 643 { 644 MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag); 645 // Don't interfere with a concurrent iteration. 646 if (_concurrent_iteration_active) return; 647 // Remove block from _active_list, updating head if needed. 648 _active_list.unlink(*block); 649 --_block_count; 650 if (block == _active_head) { 651 _active_head = _active_list.head(); 652 } 653 } 654 // Remove block from _allocate_list and delete it. 655 _allocate_list.unlink(*block); 656 // Release mutex while deleting block. 657 MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag); 658 delete_empty_block(*block); 659 } 660 } 661 662 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const { 663 const Block* block = find_block_or_null(ptr); 664 if (block != NULL) { 665 // Verify block is a real block. For now, simple linear search. 666 // Do something more clever if this is a performance bottleneck. 667 MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); 668 for (const Block* check_block = _active_list.chead(); 669 check_block != NULL; 670 check_block = _active_list.next(*check_block)) { 671 if (check_block == block) { 672 if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) { 673 return ALLOCATED_ENTRY; 674 } else { 675 return UNALLOCATED_ENTRY; 676 } 677 } 678 } 679 } 680 return INVALID_ENTRY; 681 } 682 683 size_t OopStorage::allocation_count() const { 684 return _allocation_count; 685 } 686 687 size_t OopStorage::block_count() const { 688 return _block_count; 689 } 690 691 size_t OopStorage::total_memory_usage() const { 692 size_t total_size = sizeof(OopStorage); 693 total_size += strlen(name()) + 1; 694 total_size += block_count() * Block::allocation_size(); 695 return total_size; 696 } 697 698 // Parallel iteration support 699 #if INCLUDE_ALL_GCS 700 701 static char* not_started_marker_dummy = NULL; 702 static void* const not_started_marker = ¬_started_marker_dummy; 703 704 OopStorage::BasicParState::BasicParState(OopStorage* storage, bool concurrent) : 705 _storage(storage), 706 _next_block(not_started_marker), 707 _concurrent(concurrent) 708 { 709 update_iteration_state(true); 710 } 711 712 OopStorage::BasicParState::~BasicParState() { 713 update_iteration_state(false); 714 } 715 716 void OopStorage::BasicParState::update_iteration_state(bool value) { 717 if (_concurrent) { 718 MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag); 719 assert(_storage->_concurrent_iteration_active != value, "precondition"); 720 _storage->_concurrent_iteration_active = value; 721 } 722 } 723 724 void OopStorage::BasicParState::ensure_iteration_started() { 725 if (!_concurrent) { 726 assert_at_safepoint(); 727 } 728 assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant"); 729 // Ensure _next_block is not the not_started_marker, setting it to 730 // the _active_head to start the iteration if necessary. 731 if (OrderAccess::load_acquire(&_next_block) == not_started_marker) { 732 Atomic::cmpxchg(_storage->_active_head, &_next_block, not_started_marker); 733 } 734 assert(_next_block != not_started_marker, "postcondition"); 735 } 736 737 OopStorage::Block* OopStorage::BasicParState::claim_next_block() { 738 assert(_next_block != not_started_marker, "Iteration not started"); 739 void* next = _next_block; 740 while (next != NULL) { 741 void* new_next = _storage->_active_list.next(*static_cast<Block*>(next)); 742 void* fetched = Atomic::cmpxchg(new_next, &_next_block, next); 743 if (fetched == next) break; // Claimed. 744 next = fetched; 745 } 746 return static_cast<Block*>(next); 747 } 748 749 #endif // INCLUDE_ALL_GCS 750 751 const char* OopStorage::name() const { return _name; } 752 753 #ifndef PRODUCT 754 755 void OopStorage::print_on(outputStream* st) const { 756 size_t allocations = _allocation_count; 757 size_t blocks = _block_count; 758 759 double data_size = section_size * section_count; 760 double alloc_percentage = percent_of((double)allocations, blocks * data_size); 761 762 st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes", 763 name(), allocations, blocks, alloc_percentage, total_memory_usage()); 764 if (_concurrent_iteration_active) { 765 st->print(", concurrent iteration active"); 766 } 767 } 768 769 #endif // !PRODUCT