src/share/vm/opto/library_call.cpp

Print this page
rev 5102 : opto: Optimize g1 pre_barrier in library_call.

*** 2753,2766 **** // Execute transformation here to avoid barrier generation in such case. if (_gvn.type(newval) == TypePtr::NULL_PTR) newval = _gvn.makecon(TypePtr::NULL_PTR); // Reference stores need a store barrier. ! pre_barrier(true /* do_load*/, ! control(), base, adr, alias_idx, newval, value_type->make_oopptr(), ! NULL /* pre_val*/, T_OBJECT); #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); if (kind == LS_xchg) { load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr, --- 2753,2771 ---- // Execute transformation here to avoid barrier generation in such case. if (_gvn.type(newval) == TypePtr::NULL_PTR) newval = _gvn.makecon(TypePtr::NULL_PTR); // Reference stores need a store barrier. ! if (kind == LS_cmpxchg) { ! // The only known value which might get overwritten is oldval. ! pre_barrier(false /* do_load */, ! control(), NULL, NULL, max_juint, NULL, NULL, ! oldval /* pre_val */, T_OBJECT); + } + // LS_xchg: see below. + #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); if (kind == LS_xchg) { load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
*** 2792,2811 **** // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); set_memory(proj, alias_idx); - // Add the trailing membar surrounding the access - insert_mem_bar(Op_MemBarCPUOrder); - insert_mem_bar(Op_MemBarAcquire); - #ifdef _LP64 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) { load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); } #endif assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; } --- 2797,2826 ---- // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); set_memory(proj, alias_idx); #ifdef _LP64 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) { load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); } #endif + // G1: Don't need to load pre_val. The old value is returned by load_store. + // The pre_barrier can execute after the xchg as long as no safepoint + // gets inserted between them. + if (type == T_OBJECT && kind == LS_xchg) { + pre_barrier(false /* do_load */, + control(), NULL, NULL, max_juint, NULL, NULL, + load_store /* pre_val */, + T_OBJECT); + } + + // Add the trailing membar surrounding the access + insert_mem_bar(Op_MemBarCPUOrder); + insert_mem_bar(Op_MemBarAcquire); + assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; }