< prev index next >

src/hotspot/share/gc/g1/heapRegionRemSet.cpp

Print this page
rev 47400 : [mq]: cmpxchg_ptr
rev 47404 : [mq]: load_ptr_acquire
rev 47406 : [mq]: assembler_cmpxchg

*** 111,123 **** } } public: ! HeapRegion* hr() const { ! return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr); ! } jint occupied() const { // Overkill, but if we ever need it... // guarantee(_occupied == _bm.count_one_bits(), "Check"); return _occupied; --- 111,121 ---- } } public: ! HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); } jint occupied() const { // Overkill, but if we ever need it... // guarantee(_occupied == _bm.count_one_bits(), "Check"); return _occupied;
*** 131,141 **** _collision_list_next = NULL; _occupied = 0; _bm.clear(); // Make sure that the bitmap clearing above has been finished before publishing // this PRT to concurrent threads. ! OrderAccess::release_store_ptr(&_hr, hr); } void add_reference(OopOrNarrowOopStar from) { add_reference_work(from, /*parallel*/ true); } --- 129,139 ---- _collision_list_next = NULL; _occupied = 0; _bm.clear(); // Make sure that the bitmap clearing above has been finished before publishing // this PRT to concurrent threads. ! OrderAccess::release_store(&_hr, hr); } void add_reference(OopOrNarrowOopStar from) { add_reference_work(from, /*parallel*/ true); }
*** 180,190 **** // linked together using their _next field. static void bulk_free(PerRegionTable* prt, PerRegionTable* last) { while (true) { PerRegionTable* fl = _free_list; last->set_next(fl); ! PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl); if (res == fl) { return; } } ShouldNotReachHere(); --- 178,188 ---- // linked together using their _next field. static void bulk_free(PerRegionTable* prt, PerRegionTable* last) { while (true) { PerRegionTable* fl = _free_list; last->set_next(fl); ! PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl); if (res == fl) { return; } } ShouldNotReachHere();
*** 198,209 **** static PerRegionTable* alloc(HeapRegion* hr) { PerRegionTable* fl = _free_list; while (fl != NULL) { PerRegionTable* nxt = fl->next(); PerRegionTable* res = ! (PerRegionTable*) ! Atomic::cmpxchg_ptr(nxt, &_free_list, fl); if (res == fl) { fl->init(hr, true); return fl; } else { fl = _free_list; --- 196,206 ---- static PerRegionTable* alloc(HeapRegion* hr) { PerRegionTable* fl = _free_list; while (fl != NULL) { PerRegionTable* nxt = fl->next(); PerRegionTable* res = ! Atomic::cmpxchg(nxt, &_free_list, fl); if (res == fl) { fl->init(hr, true); return fl; } else { fl = _free_list;
*** 414,424 **** // parsing of the list, if any, may fail to see other entries), // the content of the prt must be visible (else for instance // some mark bits may not yet seem cleared or a 'later' update // performed by a concurrent thread could be undone when the // zeroing becomes visible). This requires store ordering. ! OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt); _n_fine_entries++; if (G1HRRSUseSparseTable) { // Transfer from sparse to fine-grain. SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind); --- 411,421 ---- // parsing of the list, if any, may fail to see other entries), // the content of the prt must be visible (else for instance // some mark bits may not yet seem cleared or a 'later' update // performed by a concurrent thread could be undone when the // zeroing becomes visible). This requires store ordering. ! OrderAccess::release_store(&_fine_grain_regions[ind], prt); _n_fine_entries++; if (G1HRRSUseSparseTable) { // Transfer from sparse to fine-grain. SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
< prev index next >