< prev index next >

src/share/vm/opto/library_call.cpp

Print this page

        

*** 26,35 **** --- 26,36 ---- #include "asm/macroAssembler.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" + #include "gc/shenandoah/shenandoahRuntime.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/arraycopynode.hpp" #include "opto/c2compiler.hpp" #include "opto/callGenerator.hpp"
*** 44,53 **** --- 45,55 ---- #include "opto/mulnode.hpp" #include "opto/narrowptrnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" + #include "opto/shenandoahSupport.hpp" #include "opto/subnode.hpp" #include "prims/nativeLookup.hpp" #include "runtime/sharedRuntime.hpp" #include "trace/traceMacros.hpp"
*** 970,982 **** --- 972,994 ---- } //------------------------------inline_string_equals------------------------ bool LibraryCallKit::inline_string_equals() { Node* receiver = null_check_receiver(); + + if (ShenandoahVerifyReadsToFromSpace) { + receiver = shenandoah_read_barrier(receiver); + } + // NOTE: Do not null check argument for String.equals() because spec // allows to specify NULL as argument. Node* argument = this->argument(1); + + if (ShenandoahVerifyReadsToFromSpace) { + argument = shenandoah_read_barrier(argument); + } + if (stopped()) { return true; } // paths (plus control) merge
*** 1021,1038 **** --- 1033,1060 ---- Node* no_ctrl = NULL; // Get start addr of receiver Node* receiver_val = load_String_value(no_ctrl, receiver); + + if (ShenandoahVerifyReadsToFromSpace) { + receiver_val = shenandoah_read_barrier(receiver_val); + } + Node* receiver_offset = load_String_offset(no_ctrl, receiver); Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR); // Get length of receiver Node* receiver_cnt = load_String_length(no_ctrl, receiver); // Get start addr of argument Node* argument_val = load_String_value(no_ctrl, argument); + + if (ShenandoahVerifyReadsToFromSpace) { + argument_val = shenandoah_read_barrier(argument_val); + } + Node* argument_offset = load_String_offset(no_ctrl, argument); Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR); // Get length of argument Node* argument_cnt = load_String_length(no_ctrl, argument);
*** 1065,1074 **** --- 1087,1100 ---- //------------------------------inline_array_equals---------------------------- bool LibraryCallKit::inline_array_equals() { Node* arg1 = argument(0); Node* arg2 = argument(1); + + arg1 = shenandoah_read_barrier(arg1); + arg2 = shenandoah_read_barrier(arg2); + set_result(_gvn.transform(new AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2))); return true; } // Java version of String.indexOf(constant string)
*** 2150,2160 **** // is enabled, we need to log the value in the referent field in an SATB buffer. // This routine performs some compile time filters and generates suitable // runtime filters that guard the pre-barrier code. // Also add memory barrier for non volatile load from the referent field // to prevent commoning of loads across safepoint. ! if (!UseG1GC && !need_mem_bar) return; // Some compile time checks. // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? --- 2176,2186 ---- // is enabled, we need to log the value in the referent field in an SATB buffer. // This routine performs some compile time filters and generates suitable // runtime filters that guard the pre-barrier code. // Also add memory barrier for non volatile load from the referent field // to prevent commoning of loads across safepoint. ! if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar) return; // Some compile time checks. // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
*** 2339,2348 **** --- 2365,2379 ---- Node* val; if (!is_native_ptr) { // The base is either a Java object or a value produced by Unsafe.staticFieldBase Node* base = argument(1); // type: oop + if (is_store) { + base = shenandoah_write_barrier(base); + } else { + base = shenandoah_read_barrier(base); + } // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset offset = argument(2); // type: long // We currently rely on the cookies produced by Unsafe.xxxFieldOffset // to be plain byte offsets, which are also the same as those accepted // by oopDesc::field_base.
*** 2490,2499 **** --- 2521,2531 ---- MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; if (type != T_OBJECT ) { (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile); } else { + val = shenandoah_read_barrier_nomem(val); // Possibly an oop being stored to Java heap or native memory if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { // oop to Java heap. (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); } else {
*** 2619,2628 **** --- 2651,2662 ---- receiver = null_check(receiver); if (stopped()) { return true; } + base = shenandoah_write_barrier(base); + // Build field offset expression. // We currently rely on the cookies produced by Unsafe.xxxFieldOffset // to be plain byte offsets, which are also the same as those accepted // by oopDesc::field_base. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
*** 2660,2669 **** --- 2694,2704 ---- Node *mem = memory(alias_idx); // For now, we handle only those cases that actually exist: ints, // longs, and Object. Adding others should be straightforward. Node* load_store; + Node* result; switch(type) { case T_INT: if (kind == LS_xadd) { load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type)); } else if (kind == LS_xchg) {
*** 2671,2680 **** --- 2706,2716 ---- } else if (kind == LS_cmpxchg) { load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval)); } else { ShouldNotReachHere(); } + result = load_store; break; case T_LONG: if (kind == LS_xadd) { load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type)); } else if (kind == LS_xchg) {
*** 2682,2699 **** --- 2718,2738 ---- } else if (kind == LS_cmpxchg) { load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval)); } else { ShouldNotReachHere(); } + result = load_store; break; case T_OBJECT: // Transformation of a value which could be NULL pointer (CastPP #NULL) // could be delayed during Parse (for example, in adjust_map_after_if()). // Execute transformation here to avoid barrier generation in such case. if (_gvn.type(newval) == TypePtr::NULL_PTR) newval = _gvn.makecon(TypePtr::NULL_PTR); + newval = shenandoah_read_barrier_nomem(newval); + // Reference stores need a store barrier. if (kind == LS_xchg) { // If pre-barrier must execute before the oop store, old value will require do_load here. if (!can_move_pre_barrier()) { pre_barrier(true /* do_load*/,
*** 2725,2759 **** assert(kind == LS_cmpxchg, "wrong LoadStore operation"); Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop())); load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc)); } } else #endif { if (kind == LS_xchg) { load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr())); } else { assert(kind == LS_cmpxchg, "wrong LoadStore operation"); load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval)); } } if (kind == LS_cmpxchg) { // Emit the post barrier only when the actual store happened. // This makes sense to check only for compareAndSet that can fail to set the value. // CAS success path is marked more likely since we anticipate this is a performance // critical path, while CAS failure path can use the penalty for going through unlikely // path as backoff. Which is still better than doing a store barrier there. IdealKit ideal(this); ! ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); { sync_kit(ideal); ! post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true); ideal.sync_kit(this); } ideal.end_if(); final_sync(ideal); } else { ! post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true); } break; default: fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); break; --- 2764,2861 ---- assert(kind == LS_cmpxchg, "wrong LoadStore operation"); Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop())); load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc)); } + result = load_store; } else #endif { if (kind == LS_xchg) { load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr())); + result = load_store; } else { assert(kind == LS_cmpxchg, "wrong LoadStore operation"); load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval)); + result = load_store; + + if (UseShenandoahGC) { + // if (! success) + Node* cmp_true = _gvn.transform(new CmpINode(load_store, intcon(1))); + Node* tst_true = _gvn.transform(new BoolNode(cmp_true, BoolTest::eq)); + IfNode* iff = create_and_map_if(control(), tst_true, PROB_LIKELY_MAG(2), COUNT_UNKNOWN); + Node* iftrue = _gvn.transform(new IfTrueNode(iff)); + Node* iffalse = _gvn.transform(new IfFalseNode(iff)); + + enum { _success_path = 1, _fail_path, _shenandoah_path, PATH_LIMIT }; + RegionNode* region = new RegionNode(PATH_LIMIT); + Node* phi = new PhiNode(region, TypeInt::BOOL); + // success -> return result of CAS1. + region->init_req(_success_path, iftrue); + phi ->init_req(_success_path, load_store); + + // failure + set_control(iffalse); + + // if (read_barrier(expected) == read_barrier(old) + oldval = shenandoah_read_barrier(oldval); + + // Load old value from memory. We shuold really use what we get back from the CAS, + // if we can. + Node* current = make_load(control(), adr, TypeInstPtr::BOTTOM, type, MemNode::unordered); + // read_barrier(old) + Node* new_current = shenandoah_read_barrier(current); + + Node* chk = _gvn.transform(new CmpPNode(new_current, oldval)); + Node* test = _gvn.transform(new BoolNode(chk, BoolTest::eq)); + + IfNode* iff2 = create_and_map_if(control(), test, PROB_UNLIKELY_MAG(2), COUNT_UNKNOWN); + Node* iftrue2 = _gvn.transform(new IfTrueNode(iff2)); + Node* iffalse2 = _gvn.transform(new IfFalseNode(iff2)); + + // If they are not equal, it's a legitimate failure and we return the result of CAS1. + region->init_req(_fail_path, iffalse2); + phi ->init_req(_fail_path, load_store); + + // Otherwise we retry with old. + set_control(iftrue2); + + Node *call = make_runtime_call(RC_LEAF | RC_NO_IO, + OptoRuntime::shenandoah_cas_obj_Type(), + CAST_FROM_FN_PTR(address, ShenandoahRuntime::compare_and_swap_object), + "shenandoah_cas_obj", + NULL, + adr, newval, current); + + Node* retval = _gvn.transform(new ProjNode(call, TypeFunc::Parms + 0)); + + region->init_req(_shenandoah_path, control()); + phi ->init_req(_shenandoah_path, retval); + + set_control(_gvn.transform(region)); + record_for_igvn(region); + phi = _gvn.transform(phi); + result = phi; + } + } } if (kind == LS_cmpxchg) { // Emit the post barrier only when the actual store happened. // This makes sense to check only for compareAndSet that can fail to set the value. // CAS success path is marked more likely since we anticipate this is a performance // critical path, while CAS failure path can use the penalty for going through unlikely // path as backoff. Which is still better than doing a store barrier there. IdealKit ideal(this); ! ideal.if_then(result, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); { sync_kit(ideal); ! post_barrier(ideal.ctrl(), result, base, adr, alias_idx, newval, T_OBJECT, true); ideal.sync_kit(this); } ideal.end_if(); final_sync(ideal); } else { ! post_barrier(control(), result, base, adr, alias_idx, newval, T_OBJECT, true); } break; default: fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); break;
*** 2766,2795 **** set_memory(proj, alias_idx); if (type == T_OBJECT && kind == LS_xchg) { #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { ! load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type())); } #endif if (can_move_pre_barrier()) { // Don't need to load pre_val. The old value is returned by load_store. // The pre_barrier can execute after the xchg as long as no safepoint // gets inserted between them. pre_barrier(false /* do_load */, control(), NULL, NULL, max_juint, NULL, NULL, ! load_store /* pre_val */, T_OBJECT); } } // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); insert_mem_bar(Op_MemBarAcquire); ! assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); ! set_result(load_store); return true; } //----------------------------inline_unsafe_ordered_store---------------------- // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x); --- 2868,2897 ---- set_memory(proj, alias_idx); if (type == T_OBJECT && kind == LS_xchg) { #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { ! result = _gvn.transform(new DecodeNNode(result, result->get_ptr_type())); } #endif if (can_move_pre_barrier()) { // Don't need to load pre_val. The old value is returned by load_store. // The pre_barrier can execute after the xchg as long as no safepoint // gets inserted between them. pre_barrier(false /* do_load */, control(), NULL, NULL, max_juint, NULL, NULL, ! result /* pre_val */, T_OBJECT); } } // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); insert_mem_bar(Op_MemBarAcquire); ! assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); ! set_result(result); return true; } //----------------------------inline_unsafe_ordered_store---------------------- // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
*** 2829,2838 **** --- 2931,2942 ---- receiver = null_check(receiver); if (stopped()) { return true; } + base = shenandoah_write_barrier(base); + // Build field offset expression. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half of long offsets offset = ConvL2X(offset); Node* adr = make_unsafe_address(base, offset);
*** 2843,2854 **** insert_mem_bar(Op_MemBarRelease); insert_mem_bar(Op_MemBarCPUOrder); // Ensure that the store is atomic for longs: const bool require_atomic_access = true; Node* store; ! if (type == T_OBJECT) // reference stores need a store barrier. store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release); else { store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access); } insert_mem_bar(Op_MemBarCPUOrder); return true; --- 2947,2960 ---- insert_mem_bar(Op_MemBarRelease); insert_mem_bar(Op_MemBarCPUOrder); // Ensure that the store is atomic for longs: const bool require_atomic_access = true; Node* store; ! if (type == T_OBJECT) { // reference stores need a store barrier. ! val = shenandoah_read_barrier_nomem(val); store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release); + } else { store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access); } insert_mem_bar(Op_MemBarCPUOrder); return true;
*** 3166,3182 **** --- 3272,3296 ---- bool expect_prim = false; // most of these guys expect to work on refs enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT }; Node* mirror = argument(0); + + if (ShenandoahVerifyReadsToFromSpace) { + mirror = shenandoah_read_barrier(mirror); + } + Node* obj = top(); switch (id) { case vmIntrinsics::_isInstance: // nothing is an instance of a primitive type prim_return_value = intcon(0); obj = argument(1); + if (ShenandoahVerifyReadsToFromSpace) { + obj = shenandoah_read_barrier(obj); + } break; case vmIntrinsics::_getModifiers: prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC); assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line"); return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
*** 3420,3429 **** --- 3534,3544 ---- bool LibraryCallKit::inline_native_subtype_check() { // Pull both arguments off the stack. Node* args[2]; // two java.lang.Class mirrors: superc, subc args[0] = argument(0); args[1] = argument(1); + Node* klasses[2]; // corresponding Klasses: superk, subk klasses[0] = klasses[1] = top(); enum { // A full decision tree on {superc is prim, subc is prim}:
*** 3482,3491 **** --- 3597,3607 ---- // we must return true when they are identical primitives. // It is convenient to test this after the first null klass check. set_control(region->in(_prim_0_path)); // go back to first null check if (!stopped()) { // Since superc is primitive, make a guard for the superc==subc case. + shenandoah_acmp_barrier(args[0], args[1]); Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1])); Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq)); generate_guard(bol_eq, region, PROB_FAIR); if (region->req() == PATH_LIMIT+1) { // A guard was added. If the added guard is taken, superc==subc.
*** 3726,3735 **** --- 3842,3853 ---- // How many elements will we copy from the original? // The answer is MinI(orig_length - start, length). Node* orig_tail = _gvn.transform(new SubINode(orig_length, start)); Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); + original = shenandoah_read_barrier(original); + // Generate a direct call to the right arraycopy function(s). // We know the copy is disjoint but we might not know if the // oop stores need checking. // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). // This will fail a store-check if x contains any non-nulls.
*** 3907,3916 **** --- 4025,4038 ---- obj = null_check_oop(obj, &null_ctl); result_reg->init_req(_null_path, null_ctl); result_val->init_req(_null_path, _gvn.intcon(0)); } + if (ShenandoahVerifyReadsToFromSpace) { + obj = shenandoah_read_barrier(obj); + } + // Unconditionally null? Then return right away. if (stopped()) { set_control( result_reg->in(_null_path)); if (!stopped()) set_result(result_val->in(_null_path));
*** 4222,4231 **** --- 4344,4356 ---- Node* size = ConvL2X(argument(7)); // type: long assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); + src_ptr = shenandoah_read_barrier(src_ptr); + dst_ptr = shenandoah_write_barrier(dst_ptr); + Node* src = make_unsafe_address(src_ptr, src_off); Node* dst = make_unsafe_address(dst_ptr, dst_off); // Conservatively insert a memory barrier on all memory slices. // Do not let writes of the copy source or destination float below the copy.
*** 4250,4259 **** --- 4375,4386 ---- void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) { assert(obj_size != NULL, ""); Node* raw_obj = alloc_obj->in(1); assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), ""); + obj = shenandoah_read_barrier(obj); + AllocateNode* alloc = NULL; if (ReduceBulkZeroing) { // We will be completely responsible for initializing this object - // mark Initialize node as complete. alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
*** 4307,4316 **** --- 4434,4452 ---- set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type); } else { set_all_memory(n); } + if (UseShenandoahGC) { + // Make sure that references in the cloned object are updated for Shenandoah. + make_runtime_call(RC_LEAF|RC_NO_FP, + OptoRuntime::shenandoah_clone_barrier_Type(), + CAST_FROM_FN_PTR(address, SharedRuntime::shenandoah_clone_barrier), + "shenandoah_clone_barrier", TypePtr::BOTTOM, + alloc_obj); + } + // If necessary, emit some card marks afterwards. (Non-arrays only.) if (card_mark) { assert(!is_array, ""); // Put in store barrier for any and all oops we are sticking // into this object. (We could avoid this if we could prove
*** 4433,4442 **** --- 4569,4581 ---- // because card marking is required on each card of the array. Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); if (is_obja != NULL) { PreserveJVMState pjvms2(this); set_control(is_obja); + + obj = shenandoah_read_barrier(obj); + // Generate a direct call to the right arraycopy function(s). Node* alloc = tightly_coupled_allocation(alloc_obj, NULL); ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL); ac->set_cloneoop(); Node* n = _gvn.transform(ac);
*** 4681,4691 **** Node* src_offset = argument(1); // type: int Node* dest = argument(2); // type: oop Node* dest_offset = argument(3); // type: int Node* length = argument(4); // type: int - // Check for allocation before we add nodes that would confuse // tightly_coupled_allocation() AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL); int saved_reexecute_sp = -1; --- 4820,4829 ----
*** 4891,4900 **** --- 5029,5041 ---- if (stopped()) { return true; } + src = shenandoah_read_barrier(src); + dest = shenandoah_write_barrier(dest); + ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, // Create LoadRange and LoadKlass nodes for use during macro expansion here // so the compiler has a chance to eliminate them: during macro expansion, // we have to set their control (CastPP nodes are eliminated). load_object_klass(src), load_object_klass(dest),
*** 4920,4929 **** --- 5061,5072 ---- LibraryCallKit::tightly_coupled_allocation(Node* ptr, RegionNode* slow_region) { if (stopped()) return NULL; // no fast path if (C->AliasLevel() == 0) return NULL; // no MergeMems around + ptr = ShenandoahBarrierNode::skip_through_barrier(ptr); + AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn); if (alloc == NULL) return NULL; Node* rawmem = memory(Compile::AliasIdxRaw); // Is the allocation's memory state untouched?
*** 4999,5008 **** --- 5142,5154 ---- Node *src_offset = argument(1); Node *dst = argument(2); Node *dst_offset = argument(3); Node *length = argument(4); + src = shenandoah_read_barrier(src); + dst = shenandoah_write_barrier(dst); + const Type* src_type = src->Value(&_gvn); const Type* dst_type = dst->Value(&_gvn); const TypeAryPtr* top_src = src_type->isa_aryptr(); const TypeAryPtr* top_dest = dst_type->isa_aryptr(); if (top_src == NULL || top_src->klass() == NULL ||
*** 5048,5057 **** --- 5194,5207 ---- Node* xlen = argument(1); Node* y = argument(2); Node* ylen = argument(3); Node* z = argument(4); + x = shenandoah_read_barrier(x); + y = shenandoah_read_barrier(y); + z = shenandoah_write_barrier(z); + const Type* x_type = x->Value(&_gvn); const Type* y_type = y->Value(&_gvn); const TypeAryPtr* top_x = x_type->isa_aryptr(); const TypeAryPtr* top_y = y_type->isa_aryptr(); if (top_x == NULL || top_x->klass() == NULL ||
*** 5148,5157 **** --- 5298,5310 ---- Node* x = argument(0); Node* len = argument(1); Node* z = argument(2); Node* zlen = argument(3); + x = shenandoah_read_barrier(x); + z = shenandoah_write_barrier(z); + const Type* x_type = x->Value(&_gvn); const Type* z_type = z->Value(&_gvn); const TypeAryPtr* top_x = x_type->isa_aryptr(); const TypeAryPtr* top_z = z_type->isa_aryptr(); if (top_x == NULL || top_x->klass() == NULL ||
*** 5195,5204 **** --- 5348,5360 ---- Node* in = argument(1); Node* offset = argument(2); Node* len = argument(3); Node* k = argument(4); + in = shenandoah_read_barrier(in); + out = shenandoah_write_barrier(out); + const Type* out_type = out->Value(&_gvn); const Type* in_type = in->Value(&_gvn); const TypeAryPtr* top_out = out_type->isa_aryptr(); const TypeAryPtr* top_in = in_type->isa_aryptr(); if (top_out == NULL || top_out->klass() == NULL ||
*** 5244,5253 **** --- 5400,5414 ---- Node* n = argument(2); Node* len = argument(3); Node* inv = argument(4); Node* m = argument(6); + a = shenandoah_read_barrier(a); + b = shenandoah_read_barrier(b); + n = shenandoah_read_barrier(n); + m = shenandoah_write_barrier(m); + const Type* a_type = a->Value(&_gvn); const TypeAryPtr* top_a = a_type->isa_aryptr(); const Type* b_type = b->Value(&_gvn); const TypeAryPtr* top_b = b_type->isa_aryptr(); const Type* n_type = a->Value(&_gvn);
*** 5303,5312 **** --- 5464,5477 ---- Node* n = argument(1); Node* len = argument(2); Node* inv = argument(3); Node* m = argument(5); + a = shenandoah_read_barrier(a); + n = shenandoah_read_barrier(n); + m = shenandoah_write_barrier(m); + const Type* a_type = a->Value(&_gvn); const TypeAryPtr* top_a = a_type->isa_aryptr(); const Type* n_type = a->Value(&_gvn); const TypeAryPtr* top_n = n_type->isa_aryptr(); const Type* m_type = a->Value(&_gvn);
*** 5389,5398 **** --- 5554,5565 ---- Node* crc = argument(0); // type: int Node* src = argument(1); // type: oop Node* offset = argument(2); // type: int Node* length = argument(3); // type: int + src = shenandoah_read_barrier(src); + const Type* src_type = src->Value(&_gvn); const TypeAryPtr* top_src = src_type->isa_aryptr(); if (top_src == NULL || top_src->klass() == NULL) { // failed array check return false;
*** 5491,5504 **** --- 5658,5673 ---- if (src_elem != T_BYTE) { return false; } // 'src_start' points to src array + scaled offset + src = shenandoah_read_barrier(src); Node* src_start = array_element_address(src, offset, src_elem); // static final int[] byteTable in class CRC32C Node* table = get_table_from_crc32c_class(callee()->holder()); + table = shenandoah_read_barrier(table); Node* table_start = array_element_address(table, intcon(0), T_INT); // We assume that range check is done by caller. // TODO: generate range check (offset+length < src.length) in debug VM.
*** 5538,5547 **** --- 5707,5717 ---- // 'src_start' points to src array + scaled offset Node* src_start = basic_plus_adr(top(), base, offset); // static final int[] byteTable in class CRC32C Node* table = get_table_from_crc32c_class(callee()->holder()); + table = shenandoah_read_barrier(table); Node* table_start = array_element_address(table, intcon(0), T_INT); // Call the stub. address stubAddr = StubRoutines::updateBytesCRC32C(); const char *stubName = "updateBytesCRC32C";
*** 5581,5590 **** --- 5751,5761 ---- if (src_elem != T_BYTE) { return false; } // 'src_start' points to src array + scaled offset + src = shenandoah_read_barrier(src); Node* src_start = array_element_address(src, offset, src_elem); // We assume that range check is done by caller. // TODO: generate range check (offset+length < src.length) in debug VM.
*** 5643,5652 **** --- 5814,5827 ---- // Get the argument: Node* reference_obj = null_check_receiver(); if (stopped()) return true; + if (ShenandoahVerifyReadsToFromSpace) { + reference_obj = shenandoah_read_barrier(reference_obj); + } + Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset); ciInstanceKlass* klass = env()->Object_klass(); const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
*** 5691,5700 **** --- 5866,5877 ---- if (is_static) { const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror()); fromObj = makecon(tip); } + fromObj = shenandoah_read_barrier(fromObj); + // Next code copied from Parse::do_get_xxx(): // Compute address and memory type. int offset = field->offset_in_bytes(); bool is_vol = field->is_volatile();
*** 5751,5760 **** --- 5928,5941 ---- Node* src = argument(1); Node* src_offset = argument(2); Node* dest = argument(3); Node* dest_offset = argument(4); + // Resolve src and dest arrays for ShenandoahGC. + src = shenandoah_read_barrier(src); + dest = shenandoah_write_barrier(dest); + // (1) src and dest are arrays. const Type* src_type = src->Value(&_gvn); const Type* dest_type = dest->Value(&_gvn); const TypeAryPtr* top_src = src_type->isa_aryptr(); const TypeAryPtr* top_dest = dest_type->isa_aryptr();
*** 5819,5828 **** --- 6000,6013 ---- Node* src_offset = argument(2); Node* len = argument(3); Node* dest = argument(4); Node* dest_offset = argument(5); + // Resolve src and dest arrays for ShenandoahGC. + src = shenandoah_read_barrier(src); + dest = shenandoah_write_barrier(dest); + // (1) src and dest are arrays. const Type* src_type = src->Value(&_gvn); const Type* dest_type = dest->Value(&_gvn); const TypeAryPtr* top_src = src_type->isa_aryptr(); const TypeAryPtr* top_dest = dest_type->isa_aryptr();
*** 5863,5872 **** --- 6048,6060 ---- Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); if (k_start == NULL) return false; // similarly, get the start address of the r vector Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false); + + objRvec = shenandoah_write_barrier(objRvec); + if (objRvec == NULL) return false; Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE); Node* cbcCrypt; if (Matcher::pass_original_key_for_aes()) {
*** 5898,5907 **** --- 6086,6097 ---- Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) { Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false); assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt"); if (objAESCryptKey == NULL) return (Node *) NULL; + objAESCryptKey = shenandoah_read_barrier(objAESCryptKey); + // now have the array, need to get the start address of the K array Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT); return k_start; }
< prev index next >