2480 // No exceptions for unlocking
2481 // Capture slow path
2482 // disconnect fall-through projection from call and create a new one
2483 // hook up users of fall-through projection to region
2484 Node *slow_ctrl = _fallthroughproj->clone();
2485 transform_later(slow_ctrl);
2486 _igvn.hash_delete(_fallthroughproj);
2487 _fallthroughproj->disconnect_inputs(NULL, C);
2488 region->init_req(1, slow_ctrl);
2489 // region inputs are now complete
2490 transform_later(region);
2491 _igvn.replace_node(_fallthroughproj, region);
2492
2493 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2494 mem_phi->init_req(1, memproj );
2495 mem_phi->init_req(2, mem);
2496 transform_later(mem_phi);
2497 _igvn.replace_node(_memproj_fallthrough, mem_phi);
2498 }
2499
2500 // A value type is returned from the call but we don't know its
2501 // type. Either we get a buffered value (and nothing needs to be done)
2502 // or one of the values being returned is the klass of the value type
2503 // and we need to allocate a value type instance of that type and
2504 // initialize it with other values being returned. In that case, we
2505 // first try a fast path allocation and initialize the value with the
2506 // value klass's pack handler or we fall back to a runtime call.
2507 void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
2508 Node* ret = call->proj_out(TypeFunc::Parms);
2509 if (ret == NULL) {
2510 return;
2511 }
2512 // TODO fix this with the calling convention changes
2513 //assert(ret->bottom_type()->is_valuetypeptr()->is__Value(), "unexpected return type from MH intrinsic");
2514 const TypeFunc* tf = call->_tf;
2515 const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc();
2516 const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
2517 call->_tf = new_tf;
2518 // Make sure the change of type is applied before projections are
2519 // processed by igvn
2520 _igvn.set_type(call, call->Value(&_igvn));
2521 _igvn.set_type(ret, ret->Value(&_igvn));
2522
2523 // Before any new projection is added:
2524 CallProjections* projs = call->extract_projections(true, true);
2525
2526 Node* ctl = new Node(1);
2527 Node* mem = new Node(1);
2528 Node* io = new Node(1);
2529 Node* ex_ctl = new Node(1);
2530 Node* ex_mem = new Node(1);
2531 Node* ex_io = new Node(1);
2532 Node* res = new Node(1);
2533
2534 Node* cast = transform_later(new CastP2XNode(ctl, res));
2535 Node* mask = MakeConX(0x1);
2536 Node* masked = transform_later(new AndXNode(cast, mask));
2537 Node* cmp = transform_later(new CmpXNode(masked, mask));
2538 Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
2539 IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN);
2540 transform_later(allocation_iff);
2541 Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff));
2542 Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff));
2543
2544 Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::NOTNULL));
2545
2546 Node* mask2 = MakeConX(-2);
2547 Node* masked2 = transform_later(new AndXNode(cast, mask2));
2548 Node* rawklassptr = transform_later(new CastX2PNode(masked2));
2549 Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeKlassPtr::OBJECT_OR_NULL));
2550
2551 Node* slowpath_bol = NULL;
2552 Node* top_adr = NULL;
2553 Node* old_top = NULL;
2554 Node* new_top = NULL;
2555 if (UseTLAB) {
2556 Node* end_adr = NULL;
2557 set_eden_pointers(top_adr, end_adr);
2558 Node* end = make_load(ctl, mem, end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
2559 old_top = new LoadPNode(ctl, mem, top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
2560 transform_later(old_top);
2561 Node* layout_val = make_load(NULL, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
2562 Node* size_in_bytes = ConvI2X(layout_val);
2563 new_top = new AddPNode(top(), old_top, size_in_bytes);
2564 transform_later(new_top);
2605 ex_mem_phi->init_req(1, slow_mem);
2606 ex_io_phi->init_req(1, slow_io);
2607 ex_r->init_req(2, ex_ctl);
2608 ex_mem_phi->init_req(2, ex_mem);
2609 ex_io_phi->init_req(2, ex_io);
2610
2611 transform_later(ex_r);
2612 transform_later(ex_mem_phi);
2613 transform_later(ex_io_phi);
2614
2615 Node* slowpath_false = new IfFalseNode(slowpath_iff);
2616 transform_later(slowpath_false);
2617 Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered);
2618 transform_later(rawmem);
2619 Node* mark_node = makecon(TypeRawPtr::make((address)markOopDesc::always_locked_prototype()));
2620 rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
2621 rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
2622 if (UseCompressedClassPointers) {
2623 rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
2624 }
2625 Node* pack_handler = make_load(slowpath_false, rawmem, klass_node, in_bytes(ValueKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2626
2627 CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_value_type_Type(),
2628 NULL,
2629 "pack handler",
2630 TypeRawPtr::BOTTOM);
2631 handler_call->init_req(TypeFunc::Control, slowpath_false);
2632 handler_call->init_req(TypeFunc::Memory, rawmem);
2633 handler_call->init_req(TypeFunc::I_O, top());
2634 handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2635 handler_call->init_req(TypeFunc::ReturnAdr, top());
2636 handler_call->init_req(TypeFunc::Parms, pack_handler);
2637 handler_call->init_req(TypeFunc::Parms+1, old_top);
2638
2639 // We don't know how many values are returned. This assumes the
2640 // worst case, that all available registers are used.
2641 for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2642 if (domain->field_at(i) == Type::HALF) {
2643 slow_call->init_req(i, top());
2644 handler_call->init_req(i+1, top());
2645 continue;
2648 slow_call->init_req(i, proj);
2649 handler_call->init_req(i+1, proj);
2650 }
2651
2652 // We can safepoint at that new call
2653 copy_call_debug_info(call, slow_call);
2654 transform_later(slow_call);
2655 transform_later(handler_call);
2656
2657 Node* handler_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control));
2658 rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory));
2659 Node* slowpath_false_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms));
2660
2661 MergeMemNode* slowpath_false_mem = MergeMemNode::make(mem);
2662 slowpath_false_mem->set_memory_at(Compile::AliasIdxRaw, rawmem);
2663 transform_later(slowpath_false_mem);
2664
2665 Node* r = new RegionNode(4);
2666 Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2667 Node* io_phi = new PhiNode(r, Type::ABIO);
2668 Node* res_phi = new PhiNode(r, ret->bottom_type());
2669
2670 r->init_req(1, no_allocation_ctl);
2671 mem_phi->init_req(1, mem);
2672 io_phi->init_req(1, io);
2673 res_phi->init_req(1, no_allocation_res);
2674 r->init_req(2, slow_norm);
2675 mem_phi->init_req(2, slow_mem);
2676 io_phi->init_req(2, slow_io);
2677 res_phi->init_req(2, slow_res);
2678 r->init_req(3, handler_ctl);
2679 mem_phi->init_req(3, slowpath_false_mem);
2680 io_phi->init_req(3, io);
2681 res_phi->init_req(3, slowpath_false_res);
2682
2683 transform_later(r);
2684 transform_later(mem_phi);
2685 transform_later(io_phi);
2686 transform_later(res_phi);
2687
2688 assert(projs->nb_resproj == 1, "unexpected number of results");
|
2480 // No exceptions for unlocking
2481 // Capture slow path
2482 // disconnect fall-through projection from call and create a new one
2483 // hook up users of fall-through projection to region
2484 Node *slow_ctrl = _fallthroughproj->clone();
2485 transform_later(slow_ctrl);
2486 _igvn.hash_delete(_fallthroughproj);
2487 _fallthroughproj->disconnect_inputs(NULL, C);
2488 region->init_req(1, slow_ctrl);
2489 // region inputs are now complete
2490 transform_later(region);
2491 _igvn.replace_node(_fallthroughproj, region);
2492
2493 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2494 mem_phi->init_req(1, memproj );
2495 mem_phi->init_req(2, mem);
2496 transform_later(mem_phi);
2497 _igvn.replace_node(_memproj_fallthrough, mem_phi);
2498 }
2499
2500 // A value type might be returned from the call but we don't know its
2501 // type. Either we get a buffered value (and nothing needs to be done)
2502 // or one of the values being returned is the klass of the value type
2503 // and we need to allocate a value type instance of that type and
2504 // initialize it with other values being returned. In that case, we
2505 // first try a fast path allocation and initialize the value with the
2506 // value klass's pack handler or we fall back to a runtime call.
2507 void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
2508 assert(call->method()->is_method_handle_intrinsic(), "must be a method handle intrinsic call");
2509 Node* ret = call->proj_out_or_null(TypeFunc::Parms);
2510 if (ret == NULL) {
2511 return;
2512 }
2513 const TypeFunc* tf = call->_tf;
2514 const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc();
2515 const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
2516 call->_tf = new_tf;
2517 // Make sure the change of type is applied before projections are processed by igvn
2518 _igvn.set_type(call, call->Value(&_igvn));
2519 _igvn.set_type(ret, ret->Value(&_igvn));
2520
2521 // Before any new projection is added:
2522 CallProjections* projs = call->extract_projections(true, true);
2523
2524 Node* ctl = new Node(1);
2525 Node* mem = new Node(1);
2526 Node* io = new Node(1);
2527 Node* ex_ctl = new Node(1);
2528 Node* ex_mem = new Node(1);
2529 Node* ex_io = new Node(1);
2530 Node* res = new Node(1);
2531
2532 Node* cast = transform_later(new CastP2XNode(ctl, res));
2533 Node* mask = MakeConX(0x1);
2534 Node* masked = transform_later(new AndXNode(cast, mask));
2535 Node* cmp = transform_later(new CmpXNode(masked, mask));
2536 Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
2537 IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN);
2538 transform_later(allocation_iff);
2539 Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff));
2540 Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff));
2541
2542 Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::BOTTOM));
2543
2544 Node* mask2 = MakeConX(-2);
2545 Node* masked2 = transform_later(new AndXNode(cast, mask2));
2546 Node* rawklassptr = transform_later(new CastX2PNode(masked2));
2547 Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeKlassPtr::OBJECT_OR_NULL));
2548
2549 Node* slowpath_bol = NULL;
2550 Node* top_adr = NULL;
2551 Node* old_top = NULL;
2552 Node* new_top = NULL;
2553 if (UseTLAB) {
2554 Node* end_adr = NULL;
2555 set_eden_pointers(top_adr, end_adr);
2556 Node* end = make_load(ctl, mem, end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
2557 old_top = new LoadPNode(ctl, mem, top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
2558 transform_later(old_top);
2559 Node* layout_val = make_load(NULL, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
2560 Node* size_in_bytes = ConvI2X(layout_val);
2561 new_top = new AddPNode(top(), old_top, size_in_bytes);
2562 transform_later(new_top);
2603 ex_mem_phi->init_req(1, slow_mem);
2604 ex_io_phi->init_req(1, slow_io);
2605 ex_r->init_req(2, ex_ctl);
2606 ex_mem_phi->init_req(2, ex_mem);
2607 ex_io_phi->init_req(2, ex_io);
2608
2609 transform_later(ex_r);
2610 transform_later(ex_mem_phi);
2611 transform_later(ex_io_phi);
2612
2613 Node* slowpath_false = new IfFalseNode(slowpath_iff);
2614 transform_later(slowpath_false);
2615 Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered);
2616 transform_later(rawmem);
2617 Node* mark_node = makecon(TypeRawPtr::make((address)markOopDesc::always_locked_prototype()));
2618 rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
2619 rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
2620 if (UseCompressedClassPointers) {
2621 rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
2622 }
2623 Node* fixed_block = make_load(slowpath_false, rawmem, klass_node, in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2624 Node* pack_handler = make_load(slowpath_false, rawmem, fixed_block, in_bytes(ValueKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2625
2626 CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_value_type_Type(),
2627 NULL,
2628 "pack handler",
2629 TypeRawPtr::BOTTOM);
2630 handler_call->init_req(TypeFunc::Control, slowpath_false);
2631 handler_call->init_req(TypeFunc::Memory, rawmem);
2632 handler_call->init_req(TypeFunc::I_O, top());
2633 handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2634 handler_call->init_req(TypeFunc::ReturnAdr, top());
2635 handler_call->init_req(TypeFunc::Parms, pack_handler);
2636 handler_call->init_req(TypeFunc::Parms+1, old_top);
2637
2638 // We don't know how many values are returned. This assumes the
2639 // worst case, that all available registers are used.
2640 for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2641 if (domain->field_at(i) == Type::HALF) {
2642 slow_call->init_req(i, top());
2643 handler_call->init_req(i+1, top());
2644 continue;
2647 slow_call->init_req(i, proj);
2648 handler_call->init_req(i+1, proj);
2649 }
2650
2651 // We can safepoint at that new call
2652 copy_call_debug_info(call, slow_call);
2653 transform_later(slow_call);
2654 transform_later(handler_call);
2655
2656 Node* handler_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control));
2657 rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory));
2658 Node* slowpath_false_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms));
2659
2660 MergeMemNode* slowpath_false_mem = MergeMemNode::make(mem);
2661 slowpath_false_mem->set_memory_at(Compile::AliasIdxRaw, rawmem);
2662 transform_later(slowpath_false_mem);
2663
2664 Node* r = new RegionNode(4);
2665 Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2666 Node* io_phi = new PhiNode(r, Type::ABIO);
2667 Node* res_phi = new PhiNode(r, TypeInstPtr::BOTTOM);
2668
2669 r->init_req(1, no_allocation_ctl);
2670 mem_phi->init_req(1, mem);
2671 io_phi->init_req(1, io);
2672 res_phi->init_req(1, no_allocation_res);
2673 r->init_req(2, slow_norm);
2674 mem_phi->init_req(2, slow_mem);
2675 io_phi->init_req(2, slow_io);
2676 res_phi->init_req(2, slow_res);
2677 r->init_req(3, handler_ctl);
2678 mem_phi->init_req(3, slowpath_false_mem);
2679 io_phi->init_req(3, io);
2680 res_phi->init_req(3, slowpath_false_res);
2681
2682 transform_later(r);
2683 transform_later(mem_phi);
2684 transform_later(io_phi);
2685 transform_later(res_phi);
2686
2687 assert(projs->nb_resproj == 1, "unexpected number of results");
|