< prev index next >

src/hotspot/share/gc/g1/heapRegion.inline.hpp

Print this page
rev 56811 : imported patch 8189737-heapregion-remove-space-inheritance

*** 27,119 **** #include "gc/g1/g1BlockOffsetTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/heapRegion.hpp" - #include "gc/shared/space.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/prefetch.inline.hpp" #include "utilities/align.hpp" ! inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { HeapWord* obj = top(); size_t available = pointer_delta(end(), obj); size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; set_top(new_top); ! assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); *actual_size = want_to_allocate; return obj; } else { return NULL; } } ! inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { do { HeapWord* obj = top(); size_t available = pointer_delta(end(), obj); size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; ! HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. if (result == obj) { ! assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); *actual_size = want_to_allocate; return obj; } } else { return NULL; } } while (true); } ! inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); if (res != NULL) { _bot_part.alloc_block(res, *actual_size); } return res; } ! inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) { size_t temp; return allocate(word_size, word_size, &temp); } ! inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) { size_t temp; return par_allocate(word_size, word_size, &temp); } // Because of the requirement of keeping "_offsets" up to date with the // allocations, we sequentialize these with a lock. Therefore, best if // this is used for larger LAB allocations only. ! inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { MutexLocker x(&_par_alloc_lock); return allocate(min_word_size, desired_word_size, actual_size); } ! inline HeapWord* G1ContiguousSpace::block_start(const void* p) { return _bot_part.block_start(p); } ! inline HeapWord* ! G1ContiguousSpace::block_start_const(const void* p) const { return _bot_part.block_start_const(p); } inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const { HeapWord* addr = (HeapWord*) obj; --- 27,118 ---- #include "gc/g1/g1BlockOffsetTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/heapRegion.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/prefetch.inline.hpp" #include "utilities/align.hpp" + #include "utilities/globalDefinitions.hpp" ! inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { HeapWord* obj = top(); size_t available = pointer_delta(end(), obj); size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; set_top(new_top); ! assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment"); *actual_size = want_to_allocate; return obj; } else { return NULL; } } ! inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { do { HeapWord* obj = top(); size_t available = pointer_delta(end(), obj); size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; ! HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. if (result == obj) { ! assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment"); *actual_size = want_to_allocate; return obj; } } else { return NULL; } } while (true); } ! inline HeapWord* HeapRegion::allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); if (res != NULL) { _bot_part.alloc_block(res, *actual_size); } return res; } ! inline HeapWord* HeapRegion::allocate(size_t word_size) { size_t temp; return allocate(word_size, word_size, &temp); } ! inline HeapWord* HeapRegion::par_allocate(size_t word_size) { size_t temp; return par_allocate(word_size, word_size, &temp); } // Because of the requirement of keeping "_offsets" up to date with the // allocations, we sequentialize these with a lock. Therefore, best if // this is used for larger LAB allocations only. ! inline HeapWord* HeapRegion::par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_size) { MutexLocker x(&_par_alloc_lock); return allocate(min_word_size, desired_word_size, actual_size); } ! inline HeapWord* HeapRegion::block_start(const void* p) { return _bot_part.block_start(p); } ! inline HeapWord* HeapRegion::block_start_const(const void* p) const { return _bot_part.block_start_const(p); } inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const { HeapWord* addr = (HeapWord*) obj;
*** 132,143 **** *size = obj->size(); } return obj_is_dead; } ! inline bool ! HeapRegion::block_is_obj(const HeapWord* p) const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); if (!this->is_in(p)) { assert(is_continues_humongous(), "This case can only happen for humongous regions"); return (p == humongous_start_region()->bottom()); --- 131,141 ---- *size = obj->size(); } return obj_is_dead; } ! inline bool HeapRegion::block_is_obj(const HeapWord* p) const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); if (!this->is_in(p)) { assert(is_continues_humongous(), "This case can only happen for humongous regions"); return (p == humongous_start_region()->bottom());
*** 183,193 **** } inline void HeapRegion::complete_compaction() { // Reset space and bot after compaction is complete if needed. reset_after_compaction(); ! if (used_region().is_empty()) { reset_bot(); } // After a compaction the mark bitmap is invalid, so we must // treat all objects as being inside the unmarked area. --- 181,191 ---- } inline void HeapRegion::complete_compaction() { // Reset space and bot after compaction is complete if needed. reset_after_compaction(); ! if (is_empty()) { reset_bot(); } // After a compaction the mark bitmap is invalid, so we must // treat all objects as being inside the unmarked area.
*** 200,210 **** } } template<typename ApplyToMarkedClosure> inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) { ! HeapWord* limit = scan_limit(); HeapWord* next_addr = bottom(); while (next_addr < limit) { Prefetch::write(next_addr, PrefetchScanIntervalInBytes); // This explicit is_marked check is a way to avoid --- 198,208 ---- } } template<typename ApplyToMarkedClosure> inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) { ! HeapWord* limit = top(); HeapWord* next_addr = bottom(); while (next_addr < limit) { Prefetch::write(next_addr, PrefetchScanIntervalInBytes); // This explicit is_marked check is a way to avoid
< prev index next >