--- old/src/cpu/aarch64/vm/aarch64.ad 2016-04-06 17:17:09.230383488 +0200 +++ new/src/cpu/aarch64/vm/aarch64.ad 2016-04-06 17:17:09.118383486 +0200 @@ -5519,7 +5519,7 @@ operand immByteMapBase() %{ // Get base of card map - predicate((jbyte*)n->get_ptr() == + predicate((volatile jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base); match(ConP); --- old/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp 2016-04-06 17:17:09.778383494 +0200 +++ new/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp 2016-04-06 17:17:09.678383493 +0200 @@ -4053,7 +4053,7 @@ } void MacroAssembler::load_byte_map_base(Register reg) { - jbyte *byte_map_base = + volatile jbyte *byte_map_base = ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base; if (is_valid_AArch64_address((address)byte_map_base)) { --- old/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp 2016-04-06 17:17:10.258383500 +0200 +++ new/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp 2016-04-06 17:17:10.146383498 +0200 @@ -803,7 +803,7 @@ Register tmp = R0; Register addr = R14; Register tmp2 = R15; - jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; + volatile jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; Label restart, refill, ret; --- old/src/cpu/ppc/vm/macroAssembler_ppc.cpp 2016-04-06 17:17:10.758383506 +0200 +++ new/src/cpu/ppc/vm/macroAssembler_ppc.cpp 2016-04-06 17:17:10.658383504 +0200 @@ -2810,7 +2810,7 @@ } // Write the card table byte. -void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) { +void MacroAssembler::card_table_write(volatile jbyte* byte_map_base, Register Rtmp, Register Robj) { assert_different_registers(Robj, Rtmp, R0); load_const_optimized(Rtmp, (address)byte_map_base, R0); srdi(Robj, Robj, CardTableModRefBS::card_shift); --- old/src/cpu/ppc/vm/macroAssembler_ppc.hpp 2016-04-06 17:17:11.246383511 +0200 +++ new/src/cpu/ppc/vm/macroAssembler_ppc.hpp 2016-04-06 17:17:11.142383510 +0200 @@ -572,7 +572,7 @@ // GC barrier support. void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp); - void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj); + void card_table_write(volatile jbyte* byte_map_base, Register Rtmp, Register Robj); #if INCLUDE_ALL_GCS // General G1 pre-barrier generator. --- old/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp 2016-04-06 17:17:11.738383517 +0200 +++ new/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp 2016-04-06 17:17:11.606383516 +0200 @@ -918,7 +918,7 @@ Register cardtable = G5; Register tmp = G1_scratch; Register tmp2 = G3_scratch; - jbyte* byte_map_base = barrier_set_cast(bs)->byte_map_base; + volatile jbyte* byte_map_base = barrier_set_cast(bs)->byte_map_base; Label not_already_dirty, restart, refill, young_card; @@ -928,7 +928,7 @@ __ srl(addr, CardTableModRefBS::card_shift, addr); #endif - AddressLiteral rs(byte_map_base); + AddressLiteral rs((address)byte_map_base); __ set(rs, cardtable); // cardtable := __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] --- old/src/cpu/sparc/vm/macroAssembler_sparc.cpp 2016-04-06 17:17:12.286383524 +0200 +++ new/src/cpu/sparc/vm/macroAssembler_sparc.cpp 2016-04-06 17:17:12.158383522 +0200 @@ -755,7 +755,7 @@ } } -void MacroAssembler::card_table_write(jbyte* byte_map_base, +void MacroAssembler::card_table_write(volatile jbyte* byte_map_base, Register tmp, Register obj) { #ifdef _LP64 srlx(obj, CardTableModRefBS::card_shift, obj); @@ -3810,7 +3810,7 @@ static u_char* dirty_card_log_enqueue_end = 0; // This gets to assume that o0 contains the object address. -static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { +static void generate_dirty_card_log_enqueue(volatile jbyte* byte_map_base) { BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); CodeBuffer buf(bb); MacroAssembler masm(&buf); @@ -3824,7 +3824,7 @@ #else __ srl(O0, CardTableModRefBS::card_shift, O0); #endif - AddressLiteral addrlit(byte_map_base); + AddressLiteral addrlit((address)byte_map_base); __ set(addrlit, O1); // O1 := __ ldub(O0, O1, O2); // O2 := [O0 + O1] @@ -3905,7 +3905,7 @@ } static inline void -generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { +generate_dirty_card_log_enqueue_if_necessary(volatile jbyte* byte_map_base) { if (dirty_card_log_enqueue == 0) { generate_dirty_card_log_enqueue(byte_map_base); assert(dirty_card_log_enqueue != 0, "postcondition."); @@ -5117,4 +5117,3 @@ not1(crc); } - --- old/src/cpu/sparc/vm/macroAssembler_sparc.hpp 2016-04-06 17:17:13.042383533 +0200 +++ new/src/cpu/sparc/vm/macroAssembler_sparc.hpp 2016-04-06 17:17:12.922383531 +0200 @@ -1088,7 +1088,7 @@ void check_and_forward_exception(Register scratch_reg); // Write to card table for - register is destroyed afterwards. - void card_table_write(jbyte* byte_map_base, Register tmp, Register obj); + void card_table_write(volatile jbyte* byte_map_base, Register tmp, Register obj); void card_write_barrier_post(Register store_addr, Register new_val, Register tmp); --- old/src/cpu/sparc/vm/stubGenerator_sparc.cpp 2016-04-06 17:17:13.518383538 +0200 +++ new/src/cpu/sparc/vm/stubGenerator_sparc.cpp 2016-04-06 17:17:13.414383537 +0200 @@ -1030,7 +1030,7 @@ __ srl_ptr(addr, CardTableModRefBS::card_shift, addr); __ srl_ptr(count, CardTableModRefBS::card_shift, count); __ sub(count, addr, count); - AddressLiteral rs(ct->byte_map_base); + AddressLiteral rs((address)ct->byte_map_base); __ set(rs, tmp); __ BIND(L_loop); __ stb(G0, tmp, addr); --- old/src/share/vm/c1/c1_LIRGenerator.cpp 2016-04-06 17:17:14.018383544 +0200 +++ new/src/share/vm/c1/c1_LIRGenerator.cpp 2016-04-06 17:17:13.910383543 +0200 @@ -1604,7 +1604,7 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { CardTableModRefBS* ct = barrier_set_cast(_bs); assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code"); - LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base); + LIR_Const* card_table_base = new LIR_Const((jbyte*)ct->byte_map_base); if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); // ptr cannot be an object because we use this barrier for array card marks --- old/src/share/vm/gc/cms/cmsOopClosures.hpp 2016-04-06 17:17:14.562383551 +0200 +++ new/src/share/vm/gc/cms/cmsOopClosures.hpp 2016-04-06 17:17:14.458383549 +0200 @@ -258,16 +258,15 @@ // the closure ParMarkFromRootsClosure. class ParPushOrMarkClosure: public MetadataAwareOopClosure { private: - CMSCollector* _collector; - MemRegion _whole_span; - MemRegion _span; // local chunk - CMSBitMap* _bit_map; - OopTaskQueue* _work_queue; - CMSMarkStack* _overflow_stack; - HeapWord* const _finger; - HeapWord** const _global_finger_addr; - ParMarkFromRootsClosure* const - _parent; + CMSCollector* _collector; + MemRegion _whole_span; + MemRegion _span; // local chunk + CMSBitMap* _bit_map; + OopTaskQueue* _work_queue; + CMSMarkStack* _overflow_stack; + HeapWord* const _finger; + HeapWord* volatile* const _global_finger_addr; + ParMarkFromRootsClosure* const _parent; protected: DO_OOP_WORK_DEFN public: @@ -277,7 +276,7 @@ OopTaskQueue* work_queue, CMSMarkStack* mark_stack, HeapWord* finger, - HeapWord** global_finger_addr, + HeapWord* volatile* global_finger_addr, ParMarkFromRootsClosure* parent); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); --- old/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2016-04-06 17:17:15.014383556 +0200 +++ new/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2016-04-06 17:17:14.914383555 +0200 @@ -2999,14 +2999,14 @@ // MT Concurrent Marking Task class CMSConcMarkingTask: public YieldingFlexibleGangTask { - CMSCollector* _collector; - uint _n_workers; // requested/desired # workers - bool _result; - CompactibleFreeListSpace* _cms_space; - char _pad_front[64]; // padding to ... - HeapWord* _global_finger; // ... avoid sharing cache line - char _pad_back[64]; - HeapWord* _restart_addr; + CMSCollector* _collector; + uint _n_workers; // requested/desired # workers + bool _result; + CompactibleFreeListSpace* _cms_space; + char _pad_front[64]; // padding to ... + HeapWord* volatile _global_finger; // ... avoid sharing cache line + char _pad_back[64]; + HeapWord* _restart_addr; // Exposed here for yielding support Mutex* const _bit_map_lock; @@ -3042,7 +3042,7 @@ OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } - HeapWord** global_finger_addr() { return &_global_finger; } + HeapWord* volatile* global_finger_addr() { return &_global_finger; } CMSConcMarkingTerminator* terminator() { return &_term; } @@ -6525,7 +6525,7 @@ // Note: the local finger doesn't advance while we drain // the stack below, but the global finger sure can and will. - HeapWord** gfa = _task->global_finger_addr(); + HeapWord* volatile* gfa = _task->global_finger_addr(); ParPushOrMarkClosure pushOrMarkClosure(_collector, _span, _bit_map, _work_queue, @@ -6692,7 +6692,7 @@ OopTaskQueue* work_queue, CMSMarkStack* overflow_stack, HeapWord* finger, - HeapWord** global_finger_addr, + HeapWord* volatile* global_finger_addr, ParMarkFromRootsClosure* parent) : MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), --- old/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp 2016-04-06 17:17:15.522383562 +0200 +++ new/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp 2016-04-06 17:17:15.422383561 +0200 @@ -725,18 +725,18 @@ // Support for parallelizing young gen rescan in CMS remark phase ParNewGeneration* _young_gen; - HeapWord** _top_addr; // ... Top of Eden - HeapWord** _end_addr; // ... End of Eden - Mutex* _eden_chunk_lock; - HeapWord** _eden_chunk_array; // ... Eden partitioning array - size_t _eden_chunk_index; // ... top (exclusive) of array - size_t _eden_chunk_capacity; // ... max entries in array + HeapWord* volatile* _top_addr; // ... Top of Eden + HeapWord** _end_addr; // ... End of Eden + Mutex* _eden_chunk_lock; + HeapWord** _eden_chunk_array; // ... Eden partitioning array + size_t _eden_chunk_index; // ... top (exclusive) of array + size_t _eden_chunk_capacity; // ... max entries in array // Support for parallelizing survivor space rescan - HeapWord** _survivor_chunk_array; - size_t _survivor_chunk_index; - size_t _survivor_chunk_capacity; - size_t* _cursor; + HeapWord** _survivor_chunk_array; + size_t _survivor_chunk_index; + size_t _survivor_chunk_capacity; + size_t* _cursor; ChunkArray* _survivor_plab_array; // Support for marking stack overflow handling --- old/src/share/vm/gc/cms/parCardTableModRefBS.cpp 2016-04-06 17:17:16.102383569 +0200 +++ new/src/share/vm/gc/cms/parCardTableModRefBS.cpp 2016-04-06 17:17:16.002383568 +0200 @@ -46,7 +46,7 @@ "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads); // Make sure the LNC array is valid for the space. - jbyte** lowest_non_clean; + volatile jbyte** lowest_non_clean; uintptr_t lowest_non_clean_base_chunk_index; size_t lowest_non_clean_chunk_size; get_LNC_array_for_space(sp, lowest_non_clean, @@ -88,7 +88,7 @@ jint stride, int n_strides, OopsInGenClosure* cl, CardTableRS* ct, - jbyte** lowest_non_clean, + volatile jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size) { // We go from higher to lower addresses here; it wouldn't help that much @@ -96,21 +96,21 @@ // Find the first card address of the first chunk in the stride that is // at least "bottom" of the used region. - jbyte* start_card = byte_for(used.start()); - jbyte* end_card = byte_after(used.last()); - uintptr_t start_chunk = addr_to_chunk_index(used.start()); + volatile jbyte* start_card = byte_for(used.start()); + volatile jbyte* end_card = byte_after(used.last()); + uintptr_t start_chunk = addr_to_chunk_index(used.start()); uintptr_t start_chunk_stride_num = start_chunk % n_strides; - jbyte* chunk_card_start; + volatile jbyte* chunk_card_start; if ((uintptr_t)stride >= start_chunk_stride_num) { - chunk_card_start = (jbyte*)(start_card + - (stride - start_chunk_stride_num) * - ParGCCardsPerStrideChunk); + chunk_card_start = (volatile jbyte*)(start_card + + (stride - start_chunk_stride_num) * + ParGCCardsPerStrideChunk); } else { // Go ahead to the next chunk group boundary, then to the requested stride. - chunk_card_start = (jbyte*)(start_card + - (n_strides - start_chunk_stride_num + stride) * - ParGCCardsPerStrideChunk); + chunk_card_start = (volatile jbyte*)(start_card + + (n_strides - start_chunk_stride_num + stride) * + ParGCCardsPerStrideChunk); } while (chunk_card_start < end_card) { @@ -121,11 +121,11 @@ // by suitably initializing the "min_done" field in process_chunk_boundaries() // below, together with the dirty region extension accomplished in // DirtyCardToOopClosure::do_MemRegion(). - jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; + volatile jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; // Invariant: chunk_mr should be fully contained within the "used" region. - MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), - chunk_card_end >= end_card ? - used.end() : addr_for(chunk_card_end)); + MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), + chunk_card_end >= end_card ? + used.end() : addr_for(chunk_card_end)); assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); @@ -167,7 +167,7 @@ DirtyCardToOopClosure* dcto_cl, MemRegion chunk_mr, MemRegion used, - jbyte** lowest_non_clean, + volatile jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size) { @@ -206,17 +206,17 @@ // does not scan an object straddling the mutual boundary // too far to the right, and attempt to scan a portion of // that object twice. - jbyte* first_dirty_card = NULL; - jbyte* last_card_of_first_obj = + volatile jbyte* first_dirty_card = NULL; + volatile jbyte* last_card_of_first_obj = byte_for(first_block + sp->block_size(first_block) - 1); - jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); - jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); - jbyte* last_card_to_check = - (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, - (intptr_t) last_card_of_first_obj); + volatile jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + volatile jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); + volatile jbyte* last_card_to_check = + (volatile jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, + (intptr_t) last_card_of_first_obj); // Note that this does not need to go beyond our last card // if our first object completely straddles this chunk. - for (jbyte* cur = first_card_of_cur_chunk; + for (volatile jbyte* cur = first_card_of_cur_chunk; cur <= last_card_to_check; cur++) { jbyte val = *cur; if (card_will_be_scanned(val)) { @@ -235,7 +235,7 @@ // In this case we can help our neighbor by just asking them // to stop at our first card (even though it may not be dirty). assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); - jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + volatile jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; } @@ -260,7 +260,7 @@ // last_obj_card is the card corresponding to the start of the last object // in the chunk. Note that the last object may not start in // the chunk. - jbyte* const last_obj_card = byte_for(last_block); + volatile jbyte* const last_obj_card = byte_for(last_block); const jbyte val = *last_obj_card; if (!card_will_be_scanned(val)) { assert(!card_may_have_been_dirty(val), "Error"); @@ -272,20 +272,20 @@ // The last object must be considered dirty, and extends onto the // following chunk. Look for a dirty card in that chunk that will // bound our processing. - jbyte* limit_card = NULL; + volatile jbyte* limit_card = NULL; const size_t last_block_size = sp->block_size(last_block); - jbyte* const last_card_of_last_obj = + volatile jbyte* const last_card_of_last_obj = byte_for(last_block + last_block_size - 1); - jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); + volatile jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); // This search potentially goes a long distance looking // for the next card that will be scanned, terminating // at the end of the last_block, if no earlier dirty card // is found. assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, "last card of next chunk may be wrong"); - for (jbyte* cur = first_card_of_next_chunk; + for (volatile jbyte* cur = first_card_of_next_chunk; cur <= last_card_of_last_obj; cur++) { - const jbyte val = *cur; + const volatile jbyte val = *cur; if (card_will_be_scanned(val)) { limit_card = cur; break; } else { @@ -341,7 +341,7 @@ for (uintptr_t lnc_index = cur_chunk_index + 1; lnc_index <= last_chunk_index_to_check; lnc_index++) { - jbyte* lnc_card = lowest_non_clean[lnc_index]; + volatile jbyte* lnc_card = lowest_non_clean[lnc_index]; if (lnc_card != NULL) { // we can stop at the first non-NULL entry we find if (lnc_card <= limit_card) { @@ -373,7 +373,7 @@ void CardTableModRefBSForCTRS:: get_LNC_array_for_space(Space* sp, - jbyte**& lowest_non_clean, + volatile jbyte**& lowest_non_clean, uintptr_t& lowest_non_clean_base_chunk_index, size_t& lowest_non_clean_chunk_size) { --- old/src/share/vm/gc/cms/parNewGeneration.hpp 2016-04-06 17:17:16.714383576 +0200 +++ new/src/share/vm/gc/cms/parNewGeneration.hpp 2016-04-06 17:17:16.598383575 +0200 @@ -323,7 +323,7 @@ // A list of from-space images of to-be-scanned objects, threaded through // klass-pointers (klass information already copied to the forwarded // image.) Manipulated with CAS. - oop _overflow_list; + volatile oop _overflow_list; NOT_PRODUCT(ssize_t _num_par_pushes;) // This closure is used by the reference processor to filter out --- old/src/share/vm/gc/g1/dirtyCardQueue.cpp 2016-04-06 17:17:17.290383583 +0200 +++ new/src/share/vm/gc/g1/dirtyCardQueue.cpp 2016-04-06 17:17:17.194383582 +0200 @@ -160,7 +160,7 @@ size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size()); size_t i = DirtyCardQueue::byte_index_to_index(node->index()); for ( ; i < limit; ++i) { - jbyte* card_ptr = static_cast(buf[i]); + volatile jbyte* card_ptr = static_cast(buf[i]); assert(card_ptr != NULL, "invariant"); if (!cl->do_card_ptr(card_ptr, worker_i)) { result = false; // Incomplete processing. --- old/src/share/vm/gc/g1/dirtyCardQueue.hpp 2016-04-06 17:17:17.758383588 +0200 +++ new/src/share/vm/gc/g1/dirtyCardQueue.hpp 2016-04-06 17:17:17.654383587 +0200 @@ -37,7 +37,7 @@ public: // Process the card whose card table entry is "card_ptr". If returns // "false", terminate the iteration early. - virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0; + virtual bool do_card_ptr(volatile jbyte* card_ptr, uint worker_i) = 0; }; // A ptrQueue whose elements are "oops", pointers to object heads. --- old/src/share/vm/gc/g1/g1CardCounts.cpp 2016-04-06 17:17:18.194383594 +0200 +++ new/src/share/vm/gc/g1/g1CardCounts.cpp 2016-04-06 17:17:18.098383592 +0200 @@ -83,7 +83,7 @@ } } -uint G1CardCounts::add_card_count(jbyte* card_ptr) { +uint G1CardCounts::add_card_count(volatile jbyte* card_ptr) { // Returns the number of times the card has been refined. // If we failed to reserve/commit the counts table, return 0. // If card_ptr is beyond the committed end of the counts table, @@ -118,11 +118,11 @@ void G1CardCounts::clear_range(MemRegion mr) { if (has_count_table()) { - const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start()); + const volatile jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start()); // We use the last address in the range as the range could represent the // last region in the heap. In which case trying to find the card will be an // OOB access to the card table. - const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last()); + const volatile jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last()); #ifdef ASSERT HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr); --- old/src/share/vm/gc/g1/g1CardCounts.hpp 2016-04-06 17:17:18.722383600 +0200 +++ new/src/share/vm/gc/g1/g1CardCounts.hpp 2016-04-06 17:17:18.626383599 +0200 @@ -64,7 +64,7 @@ size_t _reserved_max_card_num; // CardTable bottom. - const jbyte* _ct_bot; + const volatile jbyte* _ct_bot; // Barrier set CardTableModRefBS* _ct_bs; @@ -77,22 +77,22 @@ return has_reserved_count_table(); } - size_t ptr_2_card_num(const jbyte* card_ptr) { + size_t ptr_2_card_num(const volatile jbyte* card_ptr) { assert(card_ptr >= _ct_bot, "Invalid card pointer: " "card_ptr: " PTR_FORMAT ", " "_ct_bot: " PTR_FORMAT, - p2i(card_ptr), p2i(_ct_bot)); - size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte)); + p2i((jbyte*)card_ptr), p2i((jbyte*)_ct_bot)); + size_t card_num = pointer_delta((void*)card_ptr, (jbyte*)_ct_bot, sizeof(jbyte)); assert(card_num < _reserved_max_card_num, - "card pointer out of range: " PTR_FORMAT, p2i(card_ptr)); + "card pointer out of range: " PTR_FORMAT, p2i((jbyte*)card_ptr)); return card_num; } - jbyte* card_num_2_ptr(size_t card_num) { + volatile jbyte* card_num_2_ptr(size_t card_num) { assert(card_num < _reserved_max_card_num, "card num out of range: " SIZE_FORMAT, card_num); - return (jbyte*) (_ct_bot + card_num); + return (volatile jbyte*) (_ct_bot + card_num); } // Clear the counts table for the given (exclusive) index range. @@ -113,7 +113,7 @@ // Increments the refinement count for the given card. // Returns the pre-increment count value. - uint add_card_count(jbyte* card_ptr); + uint add_card_count(volatile jbyte* card_ptr); // Returns true if the given count is high enough to be considered // 'hot'; false otherwise. --- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2016-04-06 17:17:19.194383605 +0200 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2016-04-06 17:17:19.082383604 +0200 @@ -96,7 +96,7 @@ public: RefineCardTableEntryClosure() : _concurrent(true) { } - bool do_card_ptr(jbyte* card_ptr, uint worker_i) { + bool do_card_ptr(volatile jbyte* card_ptr, uint worker_i) { bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false); // This path is executed by the concurrent refine or mutator threads, // concurrently, and so we do not care if card_ptr contains references @@ -121,7 +121,7 @@ G1CollectedHeap* _g1h; G1SATBCardTableLoggingModRefBS* _g1_bs; - HeapRegion* region_for_card(jbyte* card_ptr) const { + HeapRegion* region_for_card(volatile jbyte* card_ptr) const { return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr)); } @@ -135,7 +135,7 @@ RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(), _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { } - bool do_card_ptr(jbyte* card_ptr, uint worker_i) { + bool do_card_ptr(volatile jbyte* card_ptr, uint worker_i) { HeapRegion* hr = region_for_card(card_ptr); // Should only dirty cards in regions that won't be freed. @@ -3017,7 +3017,7 @@ HeapRegionRemSetIterator hrrs(r->rem_set()); size_t card_index; while (hrrs.has_next(card_index)) { - jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index); + volatile jbyte* card_ptr = (volatile jbyte*)bs->byte_for_index(card_index); // The remembered set might contain references to already freed // regions. Filter out such entries to avoid failing card table // verification. --- old/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2016-04-06 17:17:19.750383612 +0200 +++ new/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2016-04-06 17:17:19.614383610 +0200 @@ -1923,7 +1923,7 @@ } else { assert(_finger > finger, "the finger should have moved forward"); // read it again - finger = _finger; + finger = res; } } --- old/src/share/vm/gc/g1/g1EvacFailure.cpp 2016-04-06 17:17:20.338383619 +0200 +++ new/src/share/vm/gc/g1/g1EvacFailure.cpp 2016-04-06 17:17:20.198383617 +0200 @@ -53,7 +53,7 @@ if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p))) { size_t card_index = _ct_bs->index_for(p); if (_ct_bs->mark_card_deferred(card_index)) { - _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); + _dcq->enqueue((volatile jbyte*)_ct_bs->byte_for_index(card_index)); } } } --- old/src/share/vm/gc/g1/g1HotCardCache.cpp 2016-04-06 17:17:21.030383627 +0200 +++ new/src/share/vm/gc/g1/g1HotCardCache.cpp 2016-04-06 17:17:20.890383625 +0200 @@ -36,7 +36,7 @@ _use_cache = true; _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; - _hot_cache = ArrayAllocator::allocate(_hot_cache_size); + _hot_cache = ArrayAllocator::allocate(_hot_cache_size); reset_hot_cache_internal(); @@ -51,12 +51,12 @@ G1HotCardCache::~G1HotCardCache() { if (default_use_cache()) { assert(_hot_cache != NULL, "Logic"); - ArrayAllocator::free(_hot_cache, _hot_cache_size); + ArrayAllocator::free(_hot_cache, _hot_cache_size); _hot_cache = NULL; } } -jbyte* G1HotCardCache::insert(jbyte* card_ptr) { +volatile jbyte* G1HotCardCache::insert(volatile jbyte* card_ptr) { uint count = _card_counts.add_card_count(card_ptr); if (!_card_counts.is_hot(count)) { // The card is not hot so do not store it in the cache; @@ -66,7 +66,7 @@ // Otherwise, the card is hot. size_t index = Atomic::add(1, &_hot_cache_idx) - 1; size_t masked_index = index & (_hot_cache_size - 1); - jbyte* current_ptr = _hot_cache[masked_index]; + volatile jbyte* current_ptr = _hot_cache[masked_index]; // Try to store the new card pointer into the cache. Compare-and-swap to guard // against the unlikely event of a race resulting in another card pointer to @@ -74,9 +74,9 @@ // card_ptr in favor of the other option, which would be starting over. This // should be OK since card_ptr will likely be the older card already when/if // this ever happens. - jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr, - &_hot_cache[masked_index], - current_ptr); + volatile jbyte* previous_ptr = (volatile jbyte*)Atomic::cmpxchg_ptr((jbyte*)card_ptr, + &_hot_cache[masked_index], + (jbyte*)current_ptr); return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } @@ -93,7 +93,7 @@ // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _hot_cache_size); for (size_t i = start_idx; i < end_idx; i++) { - jbyte* card_ptr = _hot_cache[i]; + volatile jbyte* card_ptr = _hot_cache[i]; if (card_ptr != NULL) { bool result = cl->do_card_ptr(card_ptr, worker_i); assert(result, "Closure should always return true"); --- old/src/share/vm/gc/g1/g1HotCardCache.hpp 2016-04-06 17:17:21.510383633 +0200 +++ new/src/share/vm/gc/g1/g1HotCardCache.hpp 2016-04-06 17:17:21.414383632 +0200 @@ -63,7 +63,7 @@ // The card cache table - jbyte** _hot_cache; + volatile jbyte** _hot_cache; size_t _hot_cache_size; @@ -108,7 +108,7 @@ // adding, NULL is returned and no further action in needed. // If we evict a card from the cache to make room for the new card, // the evicted card is then returned for refinement. - jbyte* insert(jbyte* card_ptr); + volatile jbyte* insert(volatile jbyte* card_ptr); // Refine the cards that have delayed as a result of // being in the cache. --- old/src/share/vm/gc/g1/g1ParScanThreadState.hpp 2016-04-06 17:17:21.958383638 +0200 +++ new/src/share/vm/gc/g1/g1ParScanThreadState.hpp 2016-04-06 17:17:21.858383637 +0200 @@ -106,7 +106,7 @@ size_t card_index = ctbs()->index_for(p); // If the card hasn't been added to the buffer, do it. if (ctbs()->mark_card_deferred(card_index)) { - dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); + dirty_card_queue().enqueue((volatile jbyte*)ctbs()->byte_for_index(card_index)); } } } --- old/src/share/vm/gc/g1/g1RemSet.cpp 2016-04-06 17:17:22.478383644 +0200 +++ new/src/share/vm/gc/g1/g1RemSet.cpp 2016-04-06 17:17:22.342383643 +0200 @@ -223,7 +223,7 @@ _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq) {} - bool do_card_ptr(jbyte* card_ptr, uint worker_i) { + bool do_card_ptr(volatile jbyte* card_ptr, uint worker_i) { // The only time we care about recording cards that // contain references that point into the collection set // is during RSet updating within an evacuation pause. @@ -366,11 +366,11 @@ // into the collection set, if we're checking for such references; // false otherwise. -bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, +bool G1RemSet::refine_card(volatile jbyte* card_ptr, uint worker_i, bool check_for_refs_into_cset) { assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", - p2i(card_ptr), + p2i((jbyte*)card_ptr), _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), p2i(_ct_bs->addr_for(card_ptr)), _g1->addr_to_region(_ct_bs->addr_for(card_ptr))); --- old/src/share/vm/gc/g1/g1RemSet.hpp 2016-04-06 17:17:22.934383650 +0200 +++ new/src/share/vm/gc/g1/g1RemSet.hpp 2016-04-06 17:17:22.834383648 +0200 @@ -148,7 +148,7 @@ // If check_for_refs_into_cset is true, a true result is returned // if the given card contains oops that have references into the // current collection set. - virtual bool refine_card(jbyte* card_ptr, + virtual bool refine_card(volatile jbyte* card_ptr, uint worker_i, bool check_for_refs_into_cset); --- old/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp 2016-04-06 17:17:23.378383655 +0200 +++ new/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp 2016-04-06 17:17:23.278383654 +0200 @@ -102,10 +102,10 @@ } void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) { - jbyte *const first = byte_for(mr.start()); - jbyte *const last = byte_after(mr.last()); + volatile jbyte *const first = byte_for(mr.start()); + volatile jbyte *const last = byte_after(mr.last()); - memset_with_concurrent_readers(first, g1_young_gen, last - first); + memset_with_concurrent_readers((jbyte*)first, g1_young_gen, last - first); } #ifndef PRODUCT @@ -143,15 +143,15 @@ _cur_covered_regions = 1; _covered[0] = _whole_heap; - _byte_map = (jbyte*) mapper->reserved().start(); + _byte_map = (volatile jbyte*) mapper->reserved().start(); byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: "); log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, - p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); - log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); + p2i((jbyte*)&_byte_map[0]), p2i((jbyte*)&_byte_map[_last_valid_index])); + log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i((jbyte*)byte_map_base)); } void @@ -180,7 +180,7 @@ void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { volatile jbyte* byte = byte_for(mr.start()); - jbyte* last_byte = byte_for(mr.last()); + volatile jbyte* last_byte = byte_for(mr.last()); Thread* thr = Thread::current(); if (whole_heap) { while (byte <= last_byte) { --- old/src/share/vm/gc/g1/heapRegion.cpp 2016-04-06 17:17:23.950383662 +0200 +++ new/src/share/vm/gc/g1/heapRegion.cpp 2016-04-06 17:17:23.802383660 +0200 @@ -384,7 +384,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr, FilterOutOfRegionClosure* cl, bool filter_young, - jbyte* card_ptr) { + volatile jbyte* card_ptr) { // Currently, we should only have to clean the card if filter_young // is true and vice versa. if (filter_young) { @@ -1093,4 +1093,3 @@ set_saved_mark_word(NULL); reset_bot(); } - --- old/src/share/vm/gc/g1/heapRegion.hpp 2016-04-06 17:17:24.526383668 +0200 +++ new/src/share/vm/gc/g1/heapRegion.hpp 2016-04-06 17:17:24.390383667 +0200 @@ -694,7 +694,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr, FilterOutOfRegionClosure* cl, bool filter_young, - jbyte* card_ptr); + volatile jbyte* card_ptr); size_t recorded_rs_length() const { return _recorded_rs_length; } double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } --- old/src/share/vm/gc/g1/heapRegionManager.cpp 2016-04-06 17:17:25.130383676 +0200 +++ new/src/share/vm/gc/g1/heapRegionManager.cpp 2016-04-06 17:17:24.998383674 +0200 @@ -483,7 +483,7 @@ _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) { assert(n_workers > 0, "Need at least one worker."); _claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC); - memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions); + memset((uint*)_claims, Unclaimed, sizeof(*_claims) * _n_regions); } HeapRegionClaimer::~HeapRegionClaimer() { --- old/src/share/vm/gc/g1/heapRegionManager.hpp 2016-04-06 17:17:25.586383681 +0200 +++ new/src/share/vm/gc/g1/heapRegionManager.hpp 2016-04-06 17:17:25.482383680 +0200 @@ -258,9 +258,9 @@ // The HeapRegionClaimer is used during parallel iteration over heap regions, // allowing workers to claim heap regions, gaining exclusive rights to these regions. class HeapRegionClaimer : public StackObj { - uint _n_workers; - uint _n_regions; - uint* _claims; + uint _n_workers; + uint _n_regions; + volatile uint* _claims; static const uint Unclaimed = 0; static const uint Claimed = 1; @@ -284,4 +284,3 @@ bool claim_region(uint region_index); }; #endif // SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP - --- old/src/share/vm/gc/g1/heapRegionRemSet.cpp 2016-04-06 17:17:26.026383686 +0200 +++ new/src/share/vm/gc/g1/heapRegionRemSet.cpp 2016-04-06 17:17:25.926383685 +0200 @@ -56,7 +56,7 @@ PerRegionTable * _collision_list_next; // Global free list of PRTs - static PerRegionTable* _free_list; + static PerRegionTable* volatile _free_list; protected: // We need access in order to union things into the base table. @@ -249,7 +249,7 @@ static void test_fl_mem_size(); }; -PerRegionTable* PerRegionTable::_free_list = NULL; +PerRegionTable* volatile PerRegionTable::_free_list = NULL; size_t OtherRegionsTable::_max_fine_entries = 0; size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; --- old/src/share/vm/gc/g1/sparsePRT.cpp 2016-04-06 17:17:26.546383692 +0200 +++ new/src/share/vm/gc/g1/sparsePRT.cpp 2016-04-06 17:17:26.442383691 +0200 @@ -280,7 +280,7 @@ // ---------------------------------------------------------------------- -SparsePRT* SparsePRT::_head_expanded_list = NULL; +SparsePRT* volatile SparsePRT::_head_expanded_list = NULL; void SparsePRT::add_to_expanded_list(SparsePRT* sprt) { // We could expand multiple times in a pause -- only put on list once. --- old/src/share/vm/gc/g1/sparsePRT.hpp 2016-04-06 17:17:27.002383698 +0200 +++ new/src/share/vm/gc/g1/sparsePRT.hpp 2016-04-06 17:17:26.902383697 +0200 @@ -229,7 +229,7 @@ bool should_be_on_expanded_list(); - static SparsePRT* _head_expanded_list; + static SparsePRT* volatile _head_expanded_list; public: SparsePRT(HeapRegion* hr); --- old/src/share/vm/gc/parallel/cardTableExtension.cpp 2016-04-06 17:17:27.458383703 +0200 +++ new/src/share/vm/gc/parallel/cardTableExtension.cpp 2016-04-06 17:17:27.346383702 +0200 @@ -139,19 +139,19 @@ // It is a waste to get here if empty. assert(sp->bottom() < sp->top(), "Should not be called if empty"); oop* sp_top = (oop*)space_top; - jbyte* start_card = byte_for(sp->bottom()); - jbyte* end_card = byte_for(sp_top - 1) + 1; + volatile jbyte* start_card = byte_for(sp->bottom()); + volatile jbyte* end_card = byte_for(sp_top - 1) + 1; oop* last_scanned = NULL; // Prevent scanning objects more than once // The width of the stripe ssize*stripe_total must be // consistent with the number of stripes so that the complete slice // is covered. size_t slice_width = ssize * stripe_total; - for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { - jbyte* worker_start_card = slice + stripe_number * ssize; + for (volatile jbyte* slice = start_card; slice < end_card; slice += slice_width) { + volatile jbyte* worker_start_card = slice + stripe_number * ssize; if (worker_start_card >= end_card) return; // We're done. - jbyte* worker_end_card = worker_start_card + ssize; + volatile jbyte* worker_end_card = worker_start_card + ssize; if (worker_end_card > end_card) worker_end_card = end_card; @@ -208,13 +208,13 @@ assert(worker_start_card <= end_card, "worker start card beyond end card"); assert(worker_end_card <= end_card, "worker end card beyond end card"); - jbyte* current_card = worker_start_card; + volatile jbyte* current_card = worker_start_card; while (current_card < worker_end_card) { // Find an unclean card. while (current_card < worker_end_card && card_is_clean(*current_card)) { current_card++; } - jbyte* first_unclean_card = current_card; + volatile jbyte* first_unclean_card = current_card; // Find the end of a run of contiguous unclean cards while (current_card < worker_end_card && !card_is_clean(*current_card)) { @@ -231,7 +231,7 @@ HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; - jbyte* ending_card_of_last_object = byte_for(end_of_last_object); + volatile jbyte* ending_card_of_last_object = byte_for(end_of_last_object); assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); if (ending_card_of_last_object > current_card) { // This means the object spans the next complete card. @@ -240,7 +240,7 @@ } } } - jbyte* following_clean_card = current_card; + volatile jbyte* following_clean_card = current_card; if (first_unclean_card < worker_end_card) { oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); @@ -346,8 +346,8 @@ CardTableExtension* card_table = barrier_set_cast(ParallelScavengeHeap::heap()->barrier_set()); - jbyte* bot = card_table->byte_for(mr.start()); - jbyte* top = card_table->byte_for(mr.end()); + volatile jbyte* bot = card_table->byte_for(mr.start()); + volatile jbyte* top = card_table->byte_for(mr.end()); while(bot <= top) { assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); if (*bot == verify_card) @@ -357,7 +357,7 @@ } bool CardTableExtension::addr_is_marked_imprecise(void *addr) { - jbyte* p = byte_for(addr); + volatile jbyte* p = byte_for(addr); jbyte val = *p; if (card_is_dirty(val)) @@ -376,7 +376,7 @@ // Also includes verify_card bool CardTableExtension::addr_is_marked_precise(void *addr) { - jbyte* p = byte_for(addr); + volatile jbyte* p = byte_for(addr); jbyte val = *p; if (card_is_newgen(val)) @@ -472,13 +472,13 @@ int ind = changed_region; log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: "); log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, - ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); + ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, - ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); + ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, - p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); + p2i((jbyte*)byte_for(_covered[ind].start())), p2i((jbyte*)byte_for(_covered[ind].last()))); log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, - p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); + p2i(addr_for((volatile jbyte*) _committed[ind].start())), p2i(addr_for((volatile jbyte*) _committed[ind].last()))); debug_only(verify_guard();) } @@ -510,7 +510,7 @@ "Starts should have proper alignment"); #endif - jbyte* new_start = byte_for(new_region.start()); + volatile jbyte* new_start = byte_for(new_region.start()); // Round down because this is for the start address HeapWord* new_start_aligned = (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); @@ -583,7 +583,7 @@ void CardTableExtension::resize_update_committed_table(int changed_region, MemRegion new_region) { - jbyte* new_start = byte_for(new_region.start()); + volatile jbyte* new_start = byte_for(new_region.start()); // Set the new start of the committed region HeapWord* new_start_aligned = (HeapWord*)align_size_down((uintptr_t)new_start, @@ -600,13 +600,13 @@ MemRegion original_covered = _covered[changed_region]; // Initialize the card entries. Only consider the // region covered by the card table (_whole_heap) - jbyte* entry; + volatile jbyte* entry; if (new_region.start() < _whole_heap.start()) { entry = byte_for(_whole_heap.start()); } else { entry = byte_for(new_region.start()); } - jbyte* end = byte_for(original_covered.start()); + volatile jbyte* end = byte_for(original_covered.start()); // If _whole_heap starts at the original covered regions start, // this loop will not execute. while (entry < end) { *entry++ = clean_card; } --- old/src/share/vm/gc/parallel/cardTableExtension.hpp 2016-04-06 17:17:28.014383710 +0200 +++ new/src/share/vm/gc/parallel/cardTableExtension.hpp 2016-04-06 17:17:27.894383708 +0200 @@ -74,7 +74,7 @@ bool addr_is_marked_imprecise(void *addr); bool addr_is_marked_precise(void *addr); - void set_card_newgen(void* addr) { jbyte* p = byte_for(addr); *p = verify_card; } + void set_card_newgen(void* addr) { volatile jbyte* p = byte_for(addr); *p = verify_card; } // Testers for entries static bool card_is_dirty(int value) { return value == dirty_card; } @@ -84,7 +84,7 @@ // Card marking void inline_write_ref_field_gc(void* field, oop new_val) { - jbyte* byte = byte_for(field); + volatile jbyte* byte = byte_for(field); *byte = youngergen_card; } @@ -103,7 +103,7 @@ #ifdef ASSERT - bool is_valid_card_address(jbyte* addr) { + bool is_valid_card_address(volatile jbyte* addr) { return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size); } --- old/src/share/vm/gc/parallel/gcTaskThread.hpp 2016-04-06 17:17:28.578383716 +0200 +++ new/src/share/vm/gc/parallel/gcTaskThread.hpp 2016-04-06 17:17:28.434383715 +0200 @@ -41,7 +41,7 @@ GCTaskManager* _manager; // Manager for worker. const uint _processor_id; // Which processor the worker is on. - GCTaskTimeStamp* _time_stamps; + GCTaskTimeStamp* volatile _time_stamps; uint _time_stamp_index; GCTaskTimeStamp* time_stamp_at(uint index); --- old/src/share/vm/gc/parallel/mutableSpace.hpp 2016-04-06 17:17:29.162383723 +0200 +++ new/src/share/vm/gc/parallel/mutableSpace.hpp 2016-04-06 17:17:29.050383722 +0200 @@ -51,7 +51,7 @@ MemRegion _last_setup_region; size_t _alignment; protected: - HeapWord* _top; + HeapWord* volatile _top; MutableSpaceMangler* mangler() { return _mangler; } @@ -69,7 +69,7 @@ HeapWord* top() const { return _top; } virtual void set_top(HeapWord* value) { _top = value; } - HeapWord** top_addr() { return &_top; } + HeapWord* volatile* top_addr() { return &_top; } HeapWord** end_addr() { return &_end; } virtual void set_bottom(HeapWord* value) { _bottom = value; } --- old/src/share/vm/gc/parallel/parallelScavengeHeap.hpp 2016-04-06 17:17:29.610383729 +0200 +++ new/src/share/vm/gc/parallel/parallelScavengeHeap.hpp 2016-04-06 17:17:29.506383727 +0200 @@ -175,7 +175,7 @@ bool supports_inline_contig_alloc() const { return !UseNUMA; } - HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } + HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; } HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } void ensure_parsability(bool retire_tlabs); --- old/src/share/vm/gc/parallel/psCompactionManager.cpp 2016-04-06 17:17:30.062383734 +0200 +++ new/src/share/vm/gc/parallel/psCompactionManager.cpp 2016-04-06 17:17:29.962383733 +0200 @@ -54,7 +54,7 @@ uint* ParCompactionManager::_recycled_stack_index = NULL; int ParCompactionManager::_recycled_top = -1; -int ParCompactionManager::_recycled_bottom = -1; +volatile int ParCompactionManager::_recycled_bottom = -1; ParCompactionManager::ParCompactionManager() : _action(CopyAndUpdate), --- old/src/share/vm/gc/parallel/psCompactionManager.hpp 2016-04-06 17:17:30.582383740 +0200 +++ new/src/share/vm/gc/parallel/psCompactionManager.hpp 2016-04-06 17:17:30.478383739 +0200 @@ -103,7 +103,7 @@ static int _recycled_top; // The index into _recycled_stack_index of the last region stack index // popped. If -1, there has not been any entry popped. - static int _recycled_bottom; + static volatile int _recycled_bottom; static ParMarkBitMap* _mark_bitmap; --- old/src/share/vm/gc/parallel/psYoungGen.hpp 2016-04-06 17:17:31.030383745 +0200 +++ new/src/share/vm/gc/parallel/psYoungGen.hpp 2016-04-06 17:17:30.926383744 +0200 @@ -162,7 +162,7 @@ return result; } - HeapWord** top_addr() const { return eden_space()->top_addr(); } + HeapWord* volatile* top_addr() const { return eden_space()->top_addr(); } HeapWord** end_addr() const { return eden_space()->end_addr(); } // Iteration. --- old/src/share/vm/gc/parallel/vmStructs_parallelgc.hpp 2016-04-06 17:17:31.474383751 +0200 +++ new/src/share/vm/gc/parallel/vmStructs_parallelgc.hpp 2016-04-06 17:17:31.370383749 +0200 @@ -26,7 +26,8 @@ #define SHARE_VM_GC_PARALLEL_VMSTRUCTS_PARALLELGC_HPP #define VM_STRUCTS_PARALLELGC(nonstatic_field, \ - static_field) \ + volatile_nonstatic_field, \ + static_field) \ \ /**********************/ \ /* Parallel GC fields */ \ @@ -40,7 +41,7 @@ nonstatic_field(ImmutableSpace, _bottom, HeapWord*) \ nonstatic_field(ImmutableSpace, _end, HeapWord*) \ \ - nonstatic_field(MutableSpace, _top, HeapWord*) \ + volatile_nonstatic_field(MutableSpace, _top, HeapWord*) \ \ nonstatic_field(PSYoungGen, _reserved, MemRegion) \ nonstatic_field(PSYoungGen, _virtual_space, PSVirtualSpace*) \ --- old/src/share/vm/gc/serial/defNewGeneration.cpp 2016-04-06 17:17:32.066383758 +0200 +++ new/src/share/vm/gc/serial/defNewGeneration.cpp 2016-04-06 17:17:31.958383756 +0200 @@ -512,7 +512,7 @@ } -HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } +HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); } HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } void DefNewGeneration::object_iterate(ObjectClosure* blk) { --- old/src/share/vm/gc/serial/defNewGeneration.hpp 2016-04-06 17:17:32.710383765 +0200 +++ new/src/share/vm/gc/serial/defNewGeneration.hpp 2016-04-06 17:17:32.586383764 +0200 @@ -225,7 +225,7 @@ size_t max_survivor_size() const { return _max_survivor_size; } bool supports_inline_contig_alloc() const { return true; } - HeapWord** top_addr() const; + HeapWord* volatile* top_addr() const; HeapWord** end_addr() const; // Thread-local allocation buffers --- old/src/share/vm/gc/shared/cardTableModRefBS.cpp 2016-04-06 17:17:33.246383772 +0200 +++ new/src/share/vm/gc/shared/cardTableModRefBS.cpp 2016-04-06 17:17:33.146383770 +0200 @@ -104,12 +104,12 @@ // then add it to byte_map_base, i.e. // // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) - _byte_map = (jbyte*) heap_rs.base(); + _byte_map = (volatile jbyte*) heap_rs.base(); byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); - jbyte* guard_card = &_byte_map[_guard_index]; + volatile jbyte* guard_card = &_byte_map[_guard_index]; uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); _guard_region = MemRegion((HeapWord*)guard_page, _page_size); os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, @@ -118,8 +118,8 @@ log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: "); log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, - p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); - log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); + p2i((jbyte*)&_byte_map[0]), p2i((jbyte*)&_byte_map[_last_valid_index])); + log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i((jbyte*)byte_map_base)); } CardTableModRefBS::~CardTableModRefBS() { @@ -151,7 +151,7 @@ _cur_covered_regions++; _covered[res].set_start(base); _covered[res].set_word_size(0); - jbyte* ct_start = byte_for(base); + volatile jbyte* ct_start = byte_for(base); uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); _committed[res].set_start((HeapWord*)ct_start_aligned); _committed[res].set_word_size(0); @@ -211,7 +211,7 @@ cur_committed.set_end(max_prev_end); } // Align the end up to a page size (starts are already aligned). - jbyte* const new_end = byte_after(new_region.last()); + volatile jbyte* const new_end = byte_after(new_region.last()); HeapWord* new_end_aligned = (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); assert(new_end_aligned >= (HeapWord*) new_end, @@ -302,7 +302,7 @@ // to the tables. bool covered = false; for (int cr = 0; cr < _cur_covered_regions; cr++) { - if (_committed[cr].contains(new_end - 1)) { + if (_committed[cr].contains((jbyte*)new_end - 1)) { covered = true; break; } @@ -311,7 +311,7 @@ #endif // The default of 0 is not necessarily clean cards. - jbyte* entry; + volatile jbyte* entry; if (old_region.last() < _whole_heap.start()) { entry = byte_for(_whole_heap.start()); } else { @@ -321,8 +321,8 @@ "The guard card will be overwritten"); // This line commented out cleans the newly expanded region and // not the aligned up expanded region. - // jbyte* const end = byte_after(new_region.last()); - jbyte* const end = (jbyte*) new_end_for_commit; + // volatile jbyte* const end = byte_after(new_region.last()); + volatile jbyte* const end = (volatile jbyte*) new_end_for_commit; assert((end >= byte_after(new_region.last())) || collided || guarded, "Expect to be beyond new region unless impacting another region"); // do nothing if we resized downward. @@ -332,14 +332,14 @@ // The end of the new committed region should not // be in any existing region unless it matches // the start of the next region. - assert(!_committed[ri].contains(end) || + assert(!_committed[ri].contains((jbyte*)end) || (_committed[ri].start() == (HeapWord*) end), "Overlapping committed regions"); } } #endif if (entry < end) { - memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); + memset((void*)entry, clean_card, pointer_delta((void*)end, (void*)entry, sizeof(jbyte))); } } // In any case, the covered size changes. @@ -351,13 +351,13 @@ log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, - p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); + p2i((jbyte*)byte_for(_covered[ind].start())), p2i((jbyte*)byte_for(_covered[ind].last()))); log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, - p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); + p2i(addr_for((volatile jbyte*) _committed[ind].start())), p2i(addr_for((volatile jbyte*) _committed[ind].last()))); // Touch the last card of the covered region to show that it // is committed (or SEGV). - debug_only((void) (*byte_for(_covered[ind].last()));) + debug_only(jbyte last_covered_card = *byte_for(_covered[ind].last());) debug_only(verify_guard();) } @@ -372,8 +372,8 @@ void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); - jbyte* cur = byte_for(mr.start()); - jbyte* last = byte_after(mr.last()); + volatile jbyte* cur = byte_for(mr.start()); + volatile jbyte* last = byte_after(mr.last()); while (cur < last) { *cur = dirty_card; cur++; @@ -392,15 +392,15 @@ void CardTableModRefBS::clear_MemRegion(MemRegion mr) { // Be conservative: only clean cards entirely contained within the // region. - jbyte* cur; + volatile jbyte* cur; if (mr.start() == _whole_heap.start()) { cur = byte_for(mr.start()); } else { assert(mr.start() > _whole_heap.start(), "mr is not covered."); cur = byte_after(mr.start() - 1); } - jbyte* last = byte_after(mr.last()); - memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); + volatile jbyte* last = byte_after(mr.last()); + memset((void*)cur, clean_card, pointer_delta((void*)last, (void*)cur, sizeof(jbyte))); } void CardTableModRefBS::clear(MemRegion mr) { @@ -411,9 +411,9 @@ } void CardTableModRefBS::dirty(MemRegion mr) { - jbyte* first = byte_for(mr.start()); - jbyte* last = byte_after(mr.last()); - memset(first, dirty_card, last-first); + volatile jbyte* first = byte_for(mr.start()); + volatile jbyte* last = byte_after(mr.last()); + memset((void*)first, dirty_card, pointer_delta((void*)last, (void*)first, sizeof(jbyte))); } // Unlike several other card table methods, dirty_card_iterate() @@ -423,7 +423,7 @@ for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { - jbyte *cur_entry, *next_entry, *limit; + volatile jbyte *cur_entry, *next_entry, *limit; for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); cur_entry <= limit; cur_entry = next_entry) { @@ -449,7 +449,7 @@ for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { - jbyte* cur_entry, *next_entry, *limit; + volatile jbyte* cur_entry, *next_entry, *limit; for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); cur_entry <= limit; cur_entry = next_entry) { @@ -492,20 +492,20 @@ #ifndef PRODUCT void CardTableModRefBS::verify_region(MemRegion mr, jbyte val, bool val_equals) { - jbyte* start = byte_for(mr.start()); - jbyte* end = byte_for(mr.last()); + volatile jbyte* start = byte_for(mr.start()); + volatile jbyte* end = byte_for(mr.last()); bool failures = false; - for (jbyte* curr = start; curr <= end; ++curr) { + for (volatile jbyte* curr = start; curr <= end; ++curr) { jbyte curr_val = *curr; bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); if (failed) { if (!failures) { - log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); + log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i((jbyte*)start), p2i((jbyte*)end)); log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); failures = true; } log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", - p2i(curr), p2i(addr_for(curr)), + p2i((jbyte*)curr), p2i((jbyte*)addr_for(curr)), p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), (int) curr_val); } @@ -524,6 +524,5 @@ void CardTableModRefBS::print_on(outputStream* st) const { st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, - p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); + p2i((jbyte*)_byte_map), p2i(((jbyte*)_byte_map) + _byte_map_size), p2i((jbyte*)byte_map_base)); } - --- old/src/share/vm/gc/shared/cardTableModRefBS.hpp 2016-04-06 17:17:33.710383777 +0200 +++ new/src/share/vm/gc/shared/cardTableModRefBS.hpp 2016-04-06 17:17:33.610383776 +0200 @@ -70,7 +70,7 @@ size_t _last_valid_index; // index of the last valid element const size_t _page_size; // page size used when mapping _byte_map size_t _byte_map_size; // in bytes - jbyte* _byte_map; // the card marking array + volatile jbyte* _byte_map; // the card marking array int _cur_covered_regions; // The covered regions should be in address order. @@ -116,12 +116,12 @@ MemRegion committed_unique_to_self(int self, MemRegion mr) const; // Mapping from address to card marking array entry - jbyte* byte_for(const void* p) const { + volatile jbyte* byte_for(const void* p) const { assert(_whole_heap.contains(p), "Attempt to access p = " PTR_FORMAT " out of bounds of " " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); - jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; + volatile jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; assert(result >= _byte_map && result < _byte_map + _byte_map_size, "out of bounds accessor for card marking array"); return result; @@ -130,7 +130,7 @@ // The card table byte one after the card marking array // entry for argument address. Typically used for higher bounds // for loops iterating through the card table. - jbyte* byte_after(const void* p) const { + volatile jbyte* byte_after(const void* p) const { return byte_for(p) + 1; } @@ -235,16 +235,16 @@ // This would be the 0th element of _byte_map, if the heap started at 0x0. // But since the heap starts at some higher address, this points to somewhere // before the beginning of the actual _byte_map. - jbyte* byte_map_base; + volatile jbyte* byte_map_base; // Return true if "p" is at the start of a card. bool is_card_aligned(HeapWord* p) { - jbyte* pcard = byte_for(p); + volatile jbyte* pcard = byte_for(p); return (addr_for(pcard) == p); } HeapWord* align_to_card_boundary(HeapWord* p) { - jbyte* pcard = byte_for(p + card_size_in_words - 1); + volatile jbyte* pcard = byte_for(p + card_size_in_words - 1); return addr_for(pcard); } @@ -280,18 +280,18 @@ int reset_val); // Provide read-only access to the card table array. - const jbyte* byte_for_const(const void* p) const { + const volatile jbyte* byte_for_const(const void* p) const { return byte_for(p); } - const jbyte* byte_after_const(const void* p) const { + const volatile jbyte* byte_after_const(const void* p) const { return byte_after(p); } // Mapping from card marking array entry to address of first word - HeapWord* addr_for(const jbyte* p) const { + HeapWord* addr_for(const volatile jbyte* p) const { assert(p >= _byte_map && p < _byte_map + _byte_map_size, "out of bounds access to card marking array"); - size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); + size_t delta = pointer_delta((const void*)p, (const void*)byte_map_base, sizeof(jbyte)); HeapWord* result = (HeapWord*) (delta << card_shift); assert(_whole_heap.contains(result), "Returning result = " PTR_FORMAT " out of bounds of " @@ -309,7 +309,7 @@ return byte_for(p) - _byte_map; } - const jbyte* byte_for_index(const size_t card_index) const { + const volatile jbyte* byte_for_index(const size_t card_index) const { return _byte_map + card_index; } --- old/src/share/vm/gc/shared/cardTableModRefBS.inline.hpp 2016-04-06 17:17:34.158383782 +0200 +++ new/src/share/vm/gc/shared/cardTableModRefBS.inline.hpp 2016-04-06 17:17:34.062383781 +0200 @@ -30,7 +30,7 @@ #include "runtime/orderAccess.inline.hpp" template inline void CardTableModRefBS::inline_write_ref_field(T* field, oop newVal, bool release) { - jbyte* byte = byte_for((void*)field); + volatile jbyte* byte = byte_for((void*)field); if (release) { // Perform a releasing store if requested. OrderAccess::release_store((volatile jbyte*) byte, dirty_card); --- old/src/share/vm/gc/shared/cardTableModRefBSForCTRS.hpp 2016-04-06 17:17:34.694383789 +0200 +++ new/src/share/vm/gc/shared/cardTableModRefBSForCTRS.hpp 2016-04-06 17:17:34.590383787 +0200 @@ -80,7 +80,7 @@ // covered region. Each entry of these arrays is the lowest non-clean // card of the corresponding chunk containing part of an object from the // previous chunk, or else NULL. - typedef jbyte* CardPtr; + typedef volatile jbyte* CardPtr; typedef CardPtr* CardArr; CardArr* _lowest_non_clean; size_t* _lowest_non_clean_chunk_size; @@ -93,7 +93,7 @@ // Ensures that these arrays are of sufficient size, allocating if necessary. // May be called by several threads concurrently. void get_LNC_array_for_space(Space* sp, - jbyte**& lowest_non_clean, + volatile jbyte**& lowest_non_clean, uintptr_t& lowest_non_clean_base_chunk_index, size_t& lowest_non_clean_chunk_size); @@ -117,7 +117,7 @@ jint stride, int n_strides, OopsInGenClosure* cl, CardTableRS* ct, - jbyte** lowest_non_clean, + volatile jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size); @@ -128,7 +128,7 @@ DirtyCardToOopClosure* dcto_cl, MemRegion chunk_mr, MemRegion used, - jbyte** lowest_non_clean, + volatile jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size); @@ -140,4 +140,3 @@ }; #endif // include guard - --- old/src/share/vm/gc/shared/cardTableRS.cpp 2016-04-06 17:17:35.142383794 +0200 +++ new/src/share/vm/gc/shared/cardTableRS.cpp 2016-04-06 17:17:35.046383793 +0200 @@ -152,7 +152,7 @@ g->younger_refs_iterate(blk, n_threads); } -inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { +inline bool ClearNoncleanCardWrapper::clear_card(volatile jbyte* entry) { if (_is_par) { return clear_card_parallel(entry); } else { @@ -160,7 +160,7 @@ } } -inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { +inline bool ClearNoncleanCardWrapper::clear_card_parallel(volatile jbyte* entry) { while (true) { // In the parallel case, we may have to do this several times. jbyte entry_val = *entry; @@ -197,7 +197,7 @@ } -inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { +inline bool ClearNoncleanCardWrapper::clear_card_serial(volatile jbyte* entry) { jbyte entry_val = *entry; assert(entry_val != CardTableRS::clean_card_val(), "We shouldn't be looking at clean cards, and this should " @@ -213,7 +213,7 @@ _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { } -bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { +bool ClearNoncleanCardWrapper::is_word_aligned(volatile jbyte* entry) { return (((intptr_t)entry) & (BytesPerWord-1)) == 0; } @@ -225,8 +225,8 @@ assert(mr.word_size() > 0, "Error"); assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); // mr.end() may not necessarily be card aligned. - jbyte* cur_entry = _ct->byte_for(mr.last()); - const jbyte* limit = _ct->byte_for(mr.start()); + volatile jbyte* cur_entry = _ct->byte_for(mr.last()); + const volatile jbyte* limit = _ct->byte_for(mr.start()); HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; while (cur_entry >= limit) { @@ -245,7 +245,7 @@ // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary if (is_word_aligned(cur_entry)) { - jbyte* cur_row = cur_entry - BytesPerWord; + volatile jbyte* cur_row = cur_entry - BytesPerWord; while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) { cur_row -= BytesPerWord; } @@ -282,7 +282,7 @@ // cur-younger-gen ==> cur_younger_gen // cur_youngergen_and_prev_nonclean_card ==> no change. void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { - jbyte* entry = _ct_bs->byte_for(field); + volatile jbyte* entry = _ct_bs->byte_for(field); do { jbyte entry_val = *entry; // We put this first because it's probably the most common case. @@ -437,11 +437,11 @@ if (s->end() <= gen_boundary) return; MemRegion used = s->used_region(); - jbyte* cur_entry = byte_for(used.start()); - jbyte* limit = byte_after(used.last()); + volatile jbyte* cur_entry = byte_for(used.start()); + volatile jbyte* limit = byte_after(used.last()); while (cur_entry < limit) { if (*cur_entry == clean_card_val()) { - jbyte* first_dirty = cur_entry+1; + volatile jbyte* first_dirty = cur_entry+1; while (first_dirty < limit && *first_dirty == clean_card_val()) { first_dirty++; --- old/src/share/vm/gc/shared/cardTableRS.hpp 2016-04-06 17:17:35.606383799 +0200 +++ new/src/share/vm/gc/shared/cardTableRS.hpp 2016-04-06 17:17:35.498383798 +0200 @@ -89,7 +89,7 @@ // used as the current value for a younger_refs_do iteration of that // portion of the table. The perm gen is index 0. The young gen is index 1, // but will always have the value "clean_card". The old gen is index 2. - jbyte* _last_cur_val_in_gen; + volatile jbyte* _last_cur_val_in_gen; jbyte _cur_youngergen_card_val; @@ -136,7 +136,7 @@ void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads); void inline_write_ref_field_gc(void* field, oop new_val) { - jbyte* byte = _ct_bs->byte_for(field); + volatile jbyte* byte = _ct_bs->byte_for(field); *byte = youngergen_card; } void write_ref_field_gc_work(void* field, oop new_val) { @@ -168,9 +168,9 @@ return CardTableModRefBSForCTRS::ct_max_alignment_constraint(); } - jbyte* byte_for(void* p) { return _ct_bs->byte_for(p); } - jbyte* byte_after(void* p) { return _ct_bs->byte_after(p); } - HeapWord* addr_for(jbyte* p) { return _ct_bs->addr_for(p); } + volatile jbyte* byte_for(void* p) { return _ct_bs->byte_for(p); } + volatile jbyte* byte_after(void* p) { return _ct_bs->byte_after(p); } + HeapWord* addr_for(volatile jbyte* p) { return _ct_bs->addr_for(p); } bool is_prev_nonclean_card_val(jbyte v) { return @@ -192,12 +192,12 @@ private: // Clears the given card, return true if the corresponding card should be // processed. - inline bool clear_card(jbyte* entry); + inline bool clear_card(volatile jbyte* entry); // Work methods called by the clear_card() - inline bool clear_card_serial(jbyte* entry); - inline bool clear_card_parallel(jbyte* entry); + inline bool clear_card_serial(volatile jbyte* entry); + inline bool clear_card_parallel(volatile jbyte* entry); // check alignment of pointer - bool is_word_aligned(jbyte* entry); + bool is_word_aligned(volatile jbyte* entry); public: ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par); --- old/src/share/vm/gc/shared/collectedHeap.hpp 2016-04-06 17:17:36.186383806 +0200 +++ new/src/share/vm/gc/shared/collectedHeap.hpp 2016-04-06 17:17:36.078383805 +0200 @@ -350,7 +350,7 @@ // These functions return the addresses of the fields that define the // boundaries of the contiguous allocation area. (These fields should be // physically near to one another.) - virtual HeapWord** top_addr() const { + virtual HeapWord* volatile* top_addr() const { guarantee(false, "inline contiguous allocation not supported"); return NULL; } --- old/src/share/vm/gc/shared/genCollectedHeap.cpp 2016-04-06 17:17:36.834383814 +0200 +++ new/src/share/vm/gc/shared/genCollectedHeap.cpp 2016-04-06 17:17:36.702383812 +0200 @@ -717,7 +717,7 @@ return _young_gen->supports_inline_contig_alloc(); } -HeapWord** GenCollectedHeap::top_addr() const { +HeapWord* volatile* GenCollectedHeap::top_addr() const { return _young_gen->top_addr(); } --- old/src/share/vm/gc/shared/genCollectedHeap.hpp 2016-04-06 17:17:37.326383820 +0200 +++ new/src/share/vm/gc/shared/genCollectedHeap.hpp 2016-04-06 17:17:37.230383819 +0200 @@ -184,7 +184,7 @@ // We may support a shared contiguous allocation area, if the youngest // generation does. bool supports_inline_contig_alloc() const; - HeapWord** top_addr() const; + HeapWord* volatile* top_addr() const; HeapWord** end_addr() const; // Perform a full collection of the heap; intended for use in implementing --- old/src/share/vm/gc/shared/generation.hpp 2016-04-06 17:17:37.786383825 +0200 +++ new/src/share/vm/gc/shared/generation.hpp 2016-04-06 17:17:37.682383824 +0200 @@ -263,7 +263,7 @@ // These functions return the addresses of the fields that define the // boundaries of the contiguous allocation area. (These fields should be // physically near to one another.) - virtual HeapWord** top_addr() const { return NULL; } + virtual HeapWord* volatile* top_addr() const { return NULL; } virtual HeapWord** end_addr() const { return NULL; } // Thread-local allocation buffers --- old/src/share/vm/gc/shared/taskqueue.hpp 2016-04-06 17:17:38.234383831 +0200 +++ new/src/share/vm/gc/shared/taskqueue.hpp 2016-04-06 17:17:38.134383829 +0200 @@ -126,7 +126,7 @@ idx_t tag() const volatile { return _fields._tag; } // Increment top; if it wraps, increment tag also. - void increment() { + void increment() volatile { _fields._top = increment_index(_fields._top); if (_fields._top == 0) ++_fields._tag; } --- old/src/share/vm/gc/shared/workgroup.cpp 2016-04-06 17:17:38.754383837 +0200 +++ new/src/share/vm/gc/shared/workgroup.cpp 2016-04-06 17:17:38.654383835 +0200 @@ -450,23 +450,21 @@ } bool SequentialSubTasksDone::is_task_claimed(uint& t) { - uint* n_claimed_ptr = &_n_claimed; - t = *n_claimed_ptr; + t = _n_claimed; while (t < _n_tasks) { - jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t); + jint res = Atomic::cmpxchg(t+1, &_n_claimed, t); if (res == (jint)t) { return false; } - t = *n_claimed_ptr; + t = res; } return true; } bool SequentialSubTasksDone::all_tasks_completed() { - uint* n_completed_ptr = &_n_completed; - uint complete = *n_completed_ptr; + uint complete = _n_completed; while (true) { - uint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete); + uint res = Atomic::cmpxchg(complete+1, &_n_completed, complete); if (res == complete) { break; } --- old/src/share/vm/gc/shared/workgroup.hpp 2016-04-06 17:17:39.206383842 +0200 +++ new/src/share/vm/gc/shared/workgroup.hpp 2016-04-06 17:17:39.106383841 +0200 @@ -285,9 +285,9 @@ // enumeration type. class SubTasksDone: public CHeapObj { - uint* _tasks; + volatile uint* _tasks; uint _n_tasks; - uint _threads_completed; + volatile uint _threads_completed; #ifdef ASSERT volatile uint _claimed; #endif --- old/src/share/vm/jvmci/jvmciCompilerToVM.cpp 2016-04-06 17:17:39.698383848 +0200 +++ new/src/share/vm/jvmci/jvmciCompilerToVM.cpp 2016-04-06 17:17:39.570383846 +0200 @@ -147,9 +147,9 @@ bool CompilerToVM::Data::_supports_inline_contig_alloc; HeapWord** CompilerToVM::Data::_heap_end_addr; -HeapWord** CompilerToVM::Data::_heap_top_addr; +HeapWord* volatile* CompilerToVM::Data::_heap_top_addr; -jbyte* CompilerToVM::Data::cardtable_start_address; +volatile jbyte* CompilerToVM::Data::cardtable_start_address; int CompilerToVM::Data::cardtable_shift; int CompilerToVM::Data::vm_page_size; @@ -179,7 +179,7 @@ _supports_inline_contig_alloc = Universe::heap()->supports_inline_contig_alloc(); _heap_end_addr = _supports_inline_contig_alloc ? Universe::heap()->end_addr() : (HeapWord**) -1; - _heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord**) -1; + _heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord* volatile*) -1; BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { @@ -188,7 +188,7 @@ case BarrierSet::CardTableExtension: case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { - jbyte* base = barrier_set_cast(bs)->byte_map_base; + volatile jbyte* base = barrier_set_cast(bs)->byte_map_base; assert(base != 0, "unexpected byte_map_base"); cardtable_start_address = base; cardtable_shift = CardTableModRefBS::card_shift; @@ -1340,7 +1340,7 @@ THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); } while (length > 0) { - jbyte* start = array->byte_at_addr(offset); + volatile jbyte* start = array->byte_at_addr(offset); tty->write((char*) start, MIN2(length, O_BUFLEN)); length -= O_BUFLEN; offset += O_BUFLEN; @@ -1492,4 +1492,3 @@ int CompilerToVM::methods_count() { return sizeof(methods) / sizeof(JNINativeMethod); } - --- old/src/share/vm/jvmci/jvmciCompilerToVM.hpp 2016-04-06 17:17:40.290383855 +0200 +++ new/src/share/vm/jvmci/jvmciCompilerToVM.hpp 2016-04-06 17:17:40.166383853 +0200 @@ -58,9 +58,9 @@ static bool _supports_inline_contig_alloc; static HeapWord** _heap_end_addr; - static HeapWord** _heap_top_addr; + static HeapWord* volatile* _heap_top_addr; - static jbyte* cardtable_start_address; + static volatile jbyte* cardtable_start_address; static int cardtable_shift; static int vm_page_size; --- old/src/share/vm/oops/oopsHierarchy.hpp 2016-04-06 17:17:40.938383862 +0200 +++ new/src/share/vm/oops/oopsHierarchy.hpp 2016-04-06 17:17:40.814383861 +0200 @@ -104,6 +104,12 @@ bool operator!=(const volatile oop o) const { return obj() != o.obj(); } bool operator!=(void *p) const { return obj() != p; } + oopDesc* operator->() const volatile { return obj(); } + bool operator==(const oop o) const volatile { return obj() == o.obj(); } + bool operator==(void *p) const volatile { return obj() == p; } + bool operator!=(const volatile oop o) const volatile { return obj() != o.obj(); } + bool operator!=(void *p) const volatile { return obj() != p; } + bool operator<(oop o) const { return obj() < o.obj(); } bool operator>(oop o) const { return obj() > o.obj(); } bool operator<=(oop o) const { return obj() <= o.obj(); } @@ -112,6 +118,8 @@ // Assignment oop& operator=(const oop& o) { _o = o.obj(); return *this; } + oop& operator=(void *o) { _o = (oopDesc*)o; return *this; } + oop& operator=(const volatile oop& o) { _o = o.obj(); return *this; } volatile oop& operator=(const oop& o) volatile { _o = o.obj(); return *this; } volatile oop& operator=(const volatile oop& o) volatile { _o = o.obj(); return *this; } --- old/src/share/vm/runtime/vmStructs.cpp 2016-04-06 17:17:41.438383868 +0200 +++ new/src/share/vm/runtime/vmStructs.cpp 2016-04-06 17:17:41.338383867 +0200 @@ -535,12 +535,12 @@ nonstatic_field(CardTableModRefBS, _last_valid_index, const size_t) \ nonstatic_field(CardTableModRefBS, _page_size, const size_t) \ nonstatic_field(CardTableModRefBS, _byte_map_size, const size_t) \ - nonstatic_field(CardTableModRefBS, _byte_map, jbyte*) \ + nonstatic_field(CardTableModRefBS, _byte_map, volatile jbyte*) \ nonstatic_field(CardTableModRefBS, _cur_covered_regions, int) \ nonstatic_field(CardTableModRefBS, _covered, MemRegion*) \ nonstatic_field(CardTableModRefBS, _committed, MemRegion*) \ nonstatic_field(CardTableModRefBS, _guard_region, MemRegion) \ - nonstatic_field(CardTableModRefBS, byte_map_base, jbyte*) \ + nonstatic_field(CardTableModRefBS, byte_map_base, volatile jbyte*) \ \ nonstatic_field(CardTableRS, _ct_bs, CardTableModRefBSForCTRS*) \ \ @@ -2246,6 +2246,8 @@ declare_integer_type(JavaThread::AsyncRequests) \ declare_toplevel_type(jbyte*) \ declare_toplevel_type(jbyte**) \ + declare_toplevel_type(volatile jbyte*) \ + declare_toplevel_type(volatile jbyte**) \ declare_toplevel_type(jint*) \ declare_toplevel_type(jniIdMapBase*) \ declare_unsigned_integer_type(juint) \ @@ -2999,6 +3001,7 @@ #if INCLUDE_ALL_GCS VM_STRUCTS_PARALLELGC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, + GENERATE_NONSTATIC_VM_STRUCT_ENTRY, GENERATE_STATIC_VM_STRUCT_ENTRY) VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, @@ -3011,7 +3014,7 @@ #if INCLUDE_TRACE VM_STRUCTS_TRACE(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, - GENERATE_STATIC_VM_STRUCT_ENTRY) + GENERATE_STATIC_VM_STRUCT_ENTRY) #endif VM_STRUCTS_EXT(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, @@ -3197,11 +3200,12 @@ #if INCLUDE_ALL_GCS VM_STRUCTS_PARALLELGC(CHECK_NONSTATIC_VM_STRUCT_ENTRY, - CHECK_STATIC_VM_STRUCT_ENTRY); + CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY, + CHECK_STATIC_VM_STRUCT_ENTRY); VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY, - CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY, - CHECK_STATIC_VM_STRUCT_ENTRY); + CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY, + CHECK_STATIC_VM_STRUCT_ENTRY); VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY, CHECK_STATIC_VM_STRUCT_ENTRY); @@ -3210,7 +3214,7 @@ #if INCLUDE_TRACE VM_STRUCTS_TRACE(CHECK_NONSTATIC_VM_STRUCT_ENTRY, - CHECK_STATIC_VM_STRUCT_ENTRY); + CHECK_STATIC_VM_STRUCT_ENTRY); #endif VM_STRUCTS_EXT(CHECK_NONSTATIC_VM_STRUCT_ENTRY, @@ -3322,6 +3326,7 @@ CHECK_NO_OP)); #if INCLUDE_ALL_GCS debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT, + ENSURE_FIELD_TYPE_PRESENT, ENSURE_FIELD_TYPE_PRESENT)); debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, ENSURE_FIELD_TYPE_PRESENT,