--- old/src/share/vm/opto/parse3.cpp 2016-07-11 22:46:50.524363343 +0900 +++ new/src/share/vm/opto/parse3.cpp 2016-07-11 22:46:50.386363827 +0900 @@ -192,7 +192,7 @@ type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { - insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier + insert_mem_bar(Opcodes::Op_MemBarVolatile); // StoreLoad barrier } // Build the load. // @@ -235,7 +235,7 @@ // another volatile read. if (field->is_volatile()) { // Memory barrier includes bogus read of value to force load BEFORE membar - insert_mem_bar(Op_MemBarAcquire, ld); + insert_mem_bar(Opcodes::Op_MemBarAcquire, ld); } } @@ -244,7 +244,7 @@ // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. - if (is_vol) insert_mem_bar(Op_MemBarRelease); + if (is_vol) insert_mem_bar(Opcodes::Op_MemBarRelease); // Compute address and memory type. int offset = field->offset_in_bytes(); @@ -286,7 +286,7 @@ if (is_vol) { // If not multiple copy atomic, we do the MemBarVolatile before the load. if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { - insert_mem_bar(Op_MemBarVolatile); // Use fat membar + insert_mem_bar(Opcodes::Op_MemBarVolatile); // Use fat membar } // Remember we wrote a volatile field. // For not multiple copy atomic cpu (ppc64) a barrier should be issued