116
117 ciMethod* caller() const { return jvms()->method(); }
118 int bci() const { return jvms()->bci(); }
119 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
120 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
121 ciMethod* callee() const { return _intrinsic->method(); }
122
123 bool try_to_inline(int predicate);
124 Node* try_to_predicate(int predicate);
125
126 void push_result() {
127 // Push the result onto the stack.
128 if (!stopped() && result() != NULL) {
129 BasicType bt = result()->bottom_type()->basic_type();
130 push_node(bt, result());
131 }
132 }
133
134 private:
135 void fatal_unexpected_iid(vmIntrinsics::ID iid) {
136 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
137 }
138
139 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
140 void set_result(RegionNode* region, PhiNode* value);
141 Node* result() { return _result; }
142
143 virtual int reexecute_sp() { return _reexecute_sp; }
144
145 // Helper functions to inline natives
146 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
147 Node* generate_slow_guard(Node* test, RegionNode* region);
148 Node* generate_fair_guard(Node* test, RegionNode* region);
149 Node* generate_negative_guard(Node* index, RegionNode* region,
150 // resulting CastII of index:
151 Node* *pos_index = NULL);
152 Node* generate_limit_guard(Node* offset, Node* subseq_length,
153 Node* array_length,
154 RegionNode* region);
155 Node* generate_current_thread(Node* &tls_output);
156 Node* load_mirror_from_klass(Node* klass);
2449 case T_BOOLEAN:
2450 case T_CHAR:
2451 case T_BYTE:
2452 case T_SHORT:
2453 case T_INT:
2454 case T_LONG:
2455 case T_FLOAT:
2456 case T_DOUBLE:
2457 break;
2458 case T_OBJECT:
2459 if (need_read_barrier) {
2460 insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2461 }
2462 break;
2463 case T_ADDRESS:
2464 // Cast to an int type.
2465 p = _gvn.transform(new CastP2XNode(NULL, p));
2466 p = ConvX2UL(p);
2467 break;
2468 default:
2469 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2470 break;
2471 }
2472 }
2473 // The load node has the control of the preceding MemBarCPUOrder. All
2474 // following nodes will have the control of the MemBarCPUOrder inserted at
2475 // the end of this method. So, pushing the load onto the stack at a later
2476 // point is fine.
2477 set_result(p);
2478 } else {
2479 // place effect of store into memory
2480 switch (type) {
2481 case T_DOUBLE:
2482 val = dstore_rounding(val);
2483 break;
2484 case T_ADDRESS:
2485 // Repackage the long as a pointer.
2486 val = ConvL2X(val);
2487 val = _gvn.transform(new CastX2PNode(val));
2488 break;
2489 }
2738 }
2739 }
2740 if (kind == LS_cmpxchg) {
2741 // Emit the post barrier only when the actual store happened.
2742 // This makes sense to check only for compareAndSet that can fail to set the value.
2743 // CAS success path is marked more likely since we anticipate this is a performance
2744 // critical path, while CAS failure path can use the penalty for going through unlikely
2745 // path as backoff. Which is still better than doing a store barrier there.
2746 IdealKit ideal(this);
2747 ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
2748 sync_kit(ideal);
2749 post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2750 ideal.sync_kit(this);
2751 } ideal.end_if();
2752 final_sync(ideal);
2753 } else {
2754 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2755 }
2756 break;
2757 default:
2758 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2759 break;
2760 }
2761
2762 // SCMemProjNodes represent the memory state of a LoadStore. Their
2763 // main role is to prevent LoadStore nodes from being optimized away
2764 // when their results aren't used.
2765 Node* proj = _gvn.transform(new SCMemProjNode(load_store));
2766 set_memory(proj, alias_idx);
2767
2768 if (type == T_OBJECT && kind == LS_xchg) {
2769 #ifdef _LP64
2770 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2771 load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
2772 }
2773 #endif
2774 if (can_move_pre_barrier()) {
2775 // Don't need to load pre_val. The old value is returned by load_store.
2776 // The pre_barrier can execute after the xchg as long as no safepoint
2777 // gets inserted between them.
2778 pre_barrier(false /* do_load */,
3790 }
3791 }
3792 }
3793 } // original reexecute is set back here
3794
3795 C->set_has_split_ifs(true); // Has chance for split-if optimization
3796 if (!stopped()) {
3797 set_result(newcopy);
3798 }
3799 return true;
3800 }
3801
3802
3803 //----------------------generate_virtual_guard---------------------------
3804 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
3805 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3806 RegionNode* slow_region) {
3807 ciMethod* method = callee();
3808 int vtable_index = method->vtable_index();
3809 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3810 err_msg_res("bad index %d", vtable_index));
3811 // Get the Method* out of the appropriate vtable entry.
3812 int entry_offset = (InstanceKlass::vtable_start_offset() +
3813 vtable_index*vtableEntry::size()) * wordSize +
3814 vtableEntry::method_offset_in_bytes();
3815 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3816 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3817
3818 // Compare the target method with the expected method (e.g., Object.hashCode).
3819 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3820
3821 Node* native_call = makecon(native_call_addr);
3822 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
3823 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
3824
3825 return generate_slow_guard(test_native, slow_region);
3826 }
3827
3828 //-----------------------generate_method_call----------------------------
3829 // Use generate_method_call to make a slow-call to the real
3830 // method if the fast path fails. An alternative would be to
3842 guarantee(method_id == method->intrinsic_id(), "must match");
3843
3844 const TypeFunc* tf = TypeFunc::make(method);
3845 CallJavaNode* slow_call;
3846 if (is_static) {
3847 assert(!is_virtual, "");
3848 slow_call = new CallStaticJavaNode(C, tf,
3849 SharedRuntime::get_resolve_static_call_stub(),
3850 method, bci());
3851 } else if (is_virtual) {
3852 null_check_receiver();
3853 int vtable_index = Method::invalid_vtable_index;
3854 if (UseInlineCaches) {
3855 // Suppress the vtable call
3856 } else {
3857 // hashCode and clone are not a miranda methods,
3858 // so the vtable index is fixed.
3859 // No need to use the linkResolver to get it.
3860 vtable_index = method->vtable_index();
3861 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3862 err_msg_res("bad index %d", vtable_index));
3863 }
3864 slow_call = new CallDynamicJavaNode(tf,
3865 SharedRuntime::get_resolve_virtual_call_stub(),
3866 method, vtable_index, bci());
3867 } else { // neither virtual nor static: opt_virtual
3868 null_check_receiver();
3869 slow_call = new CallStaticJavaNode(C, tf,
3870 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3871 method, bci());
3872 slow_call->set_optimized_virtual(true);
3873 }
3874 set_arguments_for_java_call(slow_call);
3875 set_edges_for_java_call(slow_call);
3876 return slow_call;
3877 }
3878
3879
3880 /**
3881 * Build special case code for calls to hashCode on an object. This call may
3882 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
6114 stub_name = "sha1_implCompressMB";
6115 stub_addr = StubRoutines::sha1_implCompressMB();
6116 }
6117 break;
6118 case 1:
6119 if (UseSHA256Intrinsics) {
6120 klass_SHA_name = "sun/security/provider/SHA2";
6121 stub_name = "sha256_implCompressMB";
6122 stub_addr = StubRoutines::sha256_implCompressMB();
6123 }
6124 break;
6125 case 2:
6126 if (UseSHA512Intrinsics) {
6127 klass_SHA_name = "sun/security/provider/SHA5";
6128 stub_name = "sha512_implCompressMB";
6129 stub_addr = StubRoutines::sha512_implCompressMB();
6130 long_state = true;
6131 }
6132 break;
6133 default:
6134 fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
6135 }
6136 if (klass_SHA_name != NULL) {
6137 // get DigestBase klass to lookup for SHA klass
6138 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
6139 assert(tinst != NULL, "digestBase_obj is not instance???");
6140 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6141
6142 ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6143 assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
6144 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6145 return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
6146 }
6147 return false;
6148 }
6149 //------------------------------inline_sha_implCompressMB-----------------------
6150 bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
6151 bool long_state, address stubAddr, const char *stubName,
6152 Node* src_start, Node* ofs, Node* limit) {
6153 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
6154 const TypeOopPtr* xtype = aklass->as_instance_type();
6219 switch (predicate) {
6220 case 0:
6221 if (UseSHA1Intrinsics) {
6222 // we want to do an instanceof comparison against the SHA class
6223 klass_SHA_name = "sun/security/provider/SHA";
6224 }
6225 break;
6226 case 1:
6227 if (UseSHA256Intrinsics) {
6228 // we want to do an instanceof comparison against the SHA2 class
6229 klass_SHA_name = "sun/security/provider/SHA2";
6230 }
6231 break;
6232 case 2:
6233 if (UseSHA512Intrinsics) {
6234 // we want to do an instanceof comparison against the SHA5 class
6235 klass_SHA_name = "sun/security/provider/SHA5";
6236 }
6237 break;
6238 default:
6239 fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
6240 }
6241
6242 ciKlass* klass_SHA = NULL;
6243 if (klass_SHA_name != NULL) {
6244 klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6245 }
6246 if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
6247 // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6248 Node* ctrl = control();
6249 set_control(top()); // no intrinsic path
6250 return ctrl;
6251 }
6252 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6253
6254 Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
6255 Node* cmp_instof = _gvn.transform(new CmpINode(instofSHA, intcon(1)));
6256 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6257 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6258
6259 return instof_false; // even if it is NULL
|
116
117 ciMethod* caller() const { return jvms()->method(); }
118 int bci() const { return jvms()->bci(); }
119 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
120 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
121 ciMethod* callee() const { return _intrinsic->method(); }
122
123 bool try_to_inline(int predicate);
124 Node* try_to_predicate(int predicate);
125
126 void push_result() {
127 // Push the result onto the stack.
128 if (!stopped() && result() != NULL) {
129 BasicType bt = result()->bottom_type()->basic_type();
130 push_node(bt, result());
131 }
132 }
133
134 private:
135 void fatal_unexpected_iid(vmIntrinsics::ID iid) {
136 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
137 }
138
139 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
140 void set_result(RegionNode* region, PhiNode* value);
141 Node* result() { return _result; }
142
143 virtual int reexecute_sp() { return _reexecute_sp; }
144
145 // Helper functions to inline natives
146 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
147 Node* generate_slow_guard(Node* test, RegionNode* region);
148 Node* generate_fair_guard(Node* test, RegionNode* region);
149 Node* generate_negative_guard(Node* index, RegionNode* region,
150 // resulting CastII of index:
151 Node* *pos_index = NULL);
152 Node* generate_limit_guard(Node* offset, Node* subseq_length,
153 Node* array_length,
154 RegionNode* region);
155 Node* generate_current_thread(Node* &tls_output);
156 Node* load_mirror_from_klass(Node* klass);
2449 case T_BOOLEAN:
2450 case T_CHAR:
2451 case T_BYTE:
2452 case T_SHORT:
2453 case T_INT:
2454 case T_LONG:
2455 case T_FLOAT:
2456 case T_DOUBLE:
2457 break;
2458 case T_OBJECT:
2459 if (need_read_barrier) {
2460 insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2461 }
2462 break;
2463 case T_ADDRESS:
2464 // Cast to an int type.
2465 p = _gvn.transform(new CastP2XNode(NULL, p));
2466 p = ConvX2UL(p);
2467 break;
2468 default:
2469 fatal("unexpected type %d: %s", type, type2name(type));
2470 break;
2471 }
2472 }
2473 // The load node has the control of the preceding MemBarCPUOrder. All
2474 // following nodes will have the control of the MemBarCPUOrder inserted at
2475 // the end of this method. So, pushing the load onto the stack at a later
2476 // point is fine.
2477 set_result(p);
2478 } else {
2479 // place effect of store into memory
2480 switch (type) {
2481 case T_DOUBLE:
2482 val = dstore_rounding(val);
2483 break;
2484 case T_ADDRESS:
2485 // Repackage the long as a pointer.
2486 val = ConvL2X(val);
2487 val = _gvn.transform(new CastX2PNode(val));
2488 break;
2489 }
2738 }
2739 }
2740 if (kind == LS_cmpxchg) {
2741 // Emit the post barrier only when the actual store happened.
2742 // This makes sense to check only for compareAndSet that can fail to set the value.
2743 // CAS success path is marked more likely since we anticipate this is a performance
2744 // critical path, while CAS failure path can use the penalty for going through unlikely
2745 // path as backoff. Which is still better than doing a store barrier there.
2746 IdealKit ideal(this);
2747 ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
2748 sync_kit(ideal);
2749 post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2750 ideal.sync_kit(this);
2751 } ideal.end_if();
2752 final_sync(ideal);
2753 } else {
2754 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2755 }
2756 break;
2757 default:
2758 fatal("unexpected type %d: %s", type, type2name(type));
2759 break;
2760 }
2761
2762 // SCMemProjNodes represent the memory state of a LoadStore. Their
2763 // main role is to prevent LoadStore nodes from being optimized away
2764 // when their results aren't used.
2765 Node* proj = _gvn.transform(new SCMemProjNode(load_store));
2766 set_memory(proj, alias_idx);
2767
2768 if (type == T_OBJECT && kind == LS_xchg) {
2769 #ifdef _LP64
2770 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2771 load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
2772 }
2773 #endif
2774 if (can_move_pre_barrier()) {
2775 // Don't need to load pre_val. The old value is returned by load_store.
2776 // The pre_barrier can execute after the xchg as long as no safepoint
2777 // gets inserted between them.
2778 pre_barrier(false /* do_load */,
3790 }
3791 }
3792 }
3793 } // original reexecute is set back here
3794
3795 C->set_has_split_ifs(true); // Has chance for split-if optimization
3796 if (!stopped()) {
3797 set_result(newcopy);
3798 }
3799 return true;
3800 }
3801
3802
3803 //----------------------generate_virtual_guard---------------------------
3804 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
3805 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3806 RegionNode* slow_region) {
3807 ciMethod* method = callee();
3808 int vtable_index = method->vtable_index();
3809 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3810 "bad index %d", vtable_index);
3811 // Get the Method* out of the appropriate vtable entry.
3812 int entry_offset = (InstanceKlass::vtable_start_offset() +
3813 vtable_index*vtableEntry::size()) * wordSize +
3814 vtableEntry::method_offset_in_bytes();
3815 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3816 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3817
3818 // Compare the target method with the expected method (e.g., Object.hashCode).
3819 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3820
3821 Node* native_call = makecon(native_call_addr);
3822 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
3823 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
3824
3825 return generate_slow_guard(test_native, slow_region);
3826 }
3827
3828 //-----------------------generate_method_call----------------------------
3829 // Use generate_method_call to make a slow-call to the real
3830 // method if the fast path fails. An alternative would be to
3842 guarantee(method_id == method->intrinsic_id(), "must match");
3843
3844 const TypeFunc* tf = TypeFunc::make(method);
3845 CallJavaNode* slow_call;
3846 if (is_static) {
3847 assert(!is_virtual, "");
3848 slow_call = new CallStaticJavaNode(C, tf,
3849 SharedRuntime::get_resolve_static_call_stub(),
3850 method, bci());
3851 } else if (is_virtual) {
3852 null_check_receiver();
3853 int vtable_index = Method::invalid_vtable_index;
3854 if (UseInlineCaches) {
3855 // Suppress the vtable call
3856 } else {
3857 // hashCode and clone are not a miranda methods,
3858 // so the vtable index is fixed.
3859 // No need to use the linkResolver to get it.
3860 vtable_index = method->vtable_index();
3861 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3862 "bad index %d", vtable_index);
3863 }
3864 slow_call = new CallDynamicJavaNode(tf,
3865 SharedRuntime::get_resolve_virtual_call_stub(),
3866 method, vtable_index, bci());
3867 } else { // neither virtual nor static: opt_virtual
3868 null_check_receiver();
3869 slow_call = new CallStaticJavaNode(C, tf,
3870 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3871 method, bci());
3872 slow_call->set_optimized_virtual(true);
3873 }
3874 set_arguments_for_java_call(slow_call);
3875 set_edges_for_java_call(slow_call);
3876 return slow_call;
3877 }
3878
3879
3880 /**
3881 * Build special case code for calls to hashCode on an object. This call may
3882 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
6114 stub_name = "sha1_implCompressMB";
6115 stub_addr = StubRoutines::sha1_implCompressMB();
6116 }
6117 break;
6118 case 1:
6119 if (UseSHA256Intrinsics) {
6120 klass_SHA_name = "sun/security/provider/SHA2";
6121 stub_name = "sha256_implCompressMB";
6122 stub_addr = StubRoutines::sha256_implCompressMB();
6123 }
6124 break;
6125 case 2:
6126 if (UseSHA512Intrinsics) {
6127 klass_SHA_name = "sun/security/provider/SHA5";
6128 stub_name = "sha512_implCompressMB";
6129 stub_addr = StubRoutines::sha512_implCompressMB();
6130 long_state = true;
6131 }
6132 break;
6133 default:
6134 fatal("unknown SHA intrinsic predicate: %d", predicate);
6135 }
6136 if (klass_SHA_name != NULL) {
6137 // get DigestBase klass to lookup for SHA klass
6138 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
6139 assert(tinst != NULL, "digestBase_obj is not instance???");
6140 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6141
6142 ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6143 assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
6144 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6145 return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
6146 }
6147 return false;
6148 }
6149 //------------------------------inline_sha_implCompressMB-----------------------
6150 bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
6151 bool long_state, address stubAddr, const char *stubName,
6152 Node* src_start, Node* ofs, Node* limit) {
6153 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
6154 const TypeOopPtr* xtype = aklass->as_instance_type();
6219 switch (predicate) {
6220 case 0:
6221 if (UseSHA1Intrinsics) {
6222 // we want to do an instanceof comparison against the SHA class
6223 klass_SHA_name = "sun/security/provider/SHA";
6224 }
6225 break;
6226 case 1:
6227 if (UseSHA256Intrinsics) {
6228 // we want to do an instanceof comparison against the SHA2 class
6229 klass_SHA_name = "sun/security/provider/SHA2";
6230 }
6231 break;
6232 case 2:
6233 if (UseSHA512Intrinsics) {
6234 // we want to do an instanceof comparison against the SHA5 class
6235 klass_SHA_name = "sun/security/provider/SHA5";
6236 }
6237 break;
6238 default:
6239 fatal("unknown SHA intrinsic predicate: %d", predicate);
6240 }
6241
6242 ciKlass* klass_SHA = NULL;
6243 if (klass_SHA_name != NULL) {
6244 klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6245 }
6246 if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
6247 // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6248 Node* ctrl = control();
6249 set_control(top()); // no intrinsic path
6250 return ctrl;
6251 }
6252 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6253
6254 Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
6255 Node* cmp_instof = _gvn.transform(new CmpINode(instofSHA, intcon(1)));
6256 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6257 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6258
6259 return instof_false; // even if it is NULL
|