index

src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Print this page
rev 7474 : imported patch cleanup


  79   // Clear the cached CSet starting regions and time stamps.
  80   // Their validity is dependent on the GC timestamp.
  81   clear_cset_start_regions();
  82 }
  83 
  84 inline void G1CollectedHeap::increment_gc_time_stamp() {
  85   ++_gc_time_stamp;
  86   OrderAccess::fence();
  87 }
  88 
  89 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
  90   _old_set.remove(hr);
  91 }
  92 
  93 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
  94   HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
  95   return r != NULL && r->in_collection_set();
  96 }
  97 
  98 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
  99                                                      unsigned int* gc_count_before_ret,
 100                                                      int* gclocker_retry_count_ret) {
 101   assert_heap_not_locked_and_not_at_safepoint();
 102   assert(!is_humongous(word_size), "attempt_allocation() should not "
 103          "be called for humongous allocation requests");
 104 
 105   AllocationContext_t context = AllocationContext::current();
 106   HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 107                                                                                    false /* bot_updates */);
 108   if (result == NULL) {
 109     result = attempt_allocation_slow(word_size,
 110                                      context,
 111                                      gc_count_before_ret,
 112                                      gclocker_retry_count_ret);
 113   }
 114   assert_heap_not_locked();
 115   if (result != NULL) {
 116     dirty_young_block(result, word_size);
 117   }
 118   return result;
 119 }
 120 




  79   // Clear the cached CSet starting regions and time stamps.
  80   // Their validity is dependent on the GC timestamp.
  81   clear_cset_start_regions();
  82 }
  83 
  84 inline void G1CollectedHeap::increment_gc_time_stamp() {
  85   ++_gc_time_stamp;
  86   OrderAccess::fence();
  87 }
  88 
  89 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
  90   _old_set.remove(hr);
  91 }
  92 
  93 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
  94   HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
  95   return r != NULL && r->in_collection_set();
  96 }
  97 
  98 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
  99                                                      uint* gc_count_before_ret,
 100                                                      uint* gclocker_retry_count_ret) {
 101   assert_heap_not_locked_and_not_at_safepoint();
 102   assert(!is_humongous(word_size), "attempt_allocation() should not "
 103          "be called for humongous allocation requests");
 104 
 105   AllocationContext_t context = AllocationContext::current();
 106   HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 107                                                                                    false /* bot_updates */);
 108   if (result == NULL) {
 109     result = attempt_allocation_slow(word_size,
 110                                      context,
 111                                      gc_count_before_ret,
 112                                      gclocker_retry_count_ret);
 113   }
 114   assert_heap_not_locked();
 115   if (result != NULL) {
 116     dirty_young_block(result, word_size);
 117   }
 118   return result;
 119 }
 120 


index