< prev index next >

src/share/vm/gc/g1/heapRegion.inline.hpp

Print this page




 115 inline bool
 116 HeapRegion::block_is_obj(const HeapWord* p) const {
 117   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 118   if (ClassUnloadingWithConcurrentMark) {
 119     return !g1h->is_obj_dead(oop(p), this);
 120   }
 121   return p < top();
 122 }
 123 
 124 inline size_t
 125 HeapRegion::block_size(const HeapWord *addr) const {
 126   if (addr == top()) {
 127     return pointer_delta(end(), addr);
 128   }
 129 
 130   if (block_is_obj(addr)) {
 131     return oop(addr)->size();
 132   }
 133 
 134   assert(ClassUnloadingWithConcurrentMark,
 135       err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
 136               "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
 137               "addr: " PTR_FORMAT,
 138               p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
 139 
 140   // Old regions' dead objects may have dead classes
 141   // We need to find the next live object in some other
 142   // manner than getting the oop size
 143   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 144   HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
 145       getNextMarkedWordAddress(addr, prev_top_at_mark_start());
 146 
 147   assert(next > addr, "must get the next live object");
 148   return pointer_delta(next, addr);
 149 }
 150 
 151 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
 152                                                          size_t desired_word_size,
 153                                                          size_t* actual_word_size) {
 154   assert(is_young(), "we can only skip BOT updates on young regions");
 155   return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
 156 }
 157 
 158 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {




 115 inline bool
 116 HeapRegion::block_is_obj(const HeapWord* p) const {
 117   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 118   if (ClassUnloadingWithConcurrentMark) {
 119     return !g1h->is_obj_dead(oop(p), this);
 120   }
 121   return p < top();
 122 }
 123 
 124 inline size_t
 125 HeapRegion::block_size(const HeapWord *addr) const {
 126   if (addr == top()) {
 127     return pointer_delta(end(), addr);
 128   }
 129 
 130   if (block_is_obj(addr)) {
 131     return oop(addr)->size();
 132   }
 133 
 134   assert(ClassUnloadingWithConcurrentMark,
 135          "All blocks should be objects if G1 Class Unloading isn't used. "
 136          "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
 137          "addr: " PTR_FORMAT,
 138          p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
 139 
 140   // Old regions' dead objects may have dead classes
 141   // We need to find the next live object in some other
 142   // manner than getting the oop size
 143   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 144   HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
 145       getNextMarkedWordAddress(addr, prev_top_at_mark_start());
 146 
 147   assert(next > addr, "must get the next live object");
 148   return pointer_delta(next, addr);
 149 }
 150 
 151 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
 152                                                          size_t desired_word_size,
 153                                                          size_t* actual_word_size) {
 154   assert(is_young(), "we can only skip BOT updates on young regions");
 155   return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
 156 }
 157 
 158 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {


< prev index next >