< prev index next >

src/hotspot/share/opto/matcher.cpp

Print this page

  71   _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  72   _must_clone(must_clone),
  73   _shared_nodes(C->comp_arena()),
  74 #ifdef ASSERT
  75   _old2new_map(C->comp_arena()),
  76   _new2old_map(C->comp_arena()),
  77 #endif
  78   _allocation_started(false),
  79   _ruleName(ruleName),
  80   _register_save_policy(register_save_policy),
  81   _c_reg_save_policy(c_reg_save_policy),
  82   _register_save_type(register_save_type) {
  83   C->set_matcher(this);
  84 
  85   idealreg2spillmask  [Op_RegI] = NULL;
  86   idealreg2spillmask  [Op_RegN] = NULL;
  87   idealreg2spillmask  [Op_RegL] = NULL;
  88   idealreg2spillmask  [Op_RegF] = NULL;
  89   idealreg2spillmask  [Op_RegD] = NULL;
  90   idealreg2spillmask  [Op_RegP] = NULL;

  91   idealreg2spillmask  [Op_VecS] = NULL;
  92   idealreg2spillmask  [Op_VecD] = NULL;
  93   idealreg2spillmask  [Op_VecX] = NULL;
  94   idealreg2spillmask  [Op_VecY] = NULL;
  95   idealreg2spillmask  [Op_VecZ] = NULL;
  96   idealreg2spillmask  [Op_RegFlags] = NULL;
  97 
  98   idealreg2debugmask  [Op_RegI] = NULL;
  99   idealreg2debugmask  [Op_RegN] = NULL;
 100   idealreg2debugmask  [Op_RegL] = NULL;
 101   idealreg2debugmask  [Op_RegF] = NULL;
 102   idealreg2debugmask  [Op_RegD] = NULL;
 103   idealreg2debugmask  [Op_RegP] = NULL;

 104   idealreg2debugmask  [Op_VecS] = NULL;
 105   idealreg2debugmask  [Op_VecD] = NULL;
 106   idealreg2debugmask  [Op_VecX] = NULL;
 107   idealreg2debugmask  [Op_VecY] = NULL;
 108   idealreg2debugmask  [Op_VecZ] = NULL;
 109   idealreg2debugmask  [Op_RegFlags] = NULL;
 110 
 111   idealreg2mhdebugmask[Op_RegI] = NULL;
 112   idealreg2mhdebugmask[Op_RegN] = NULL;
 113   idealreg2mhdebugmask[Op_RegL] = NULL;
 114   idealreg2mhdebugmask[Op_RegF] = NULL;
 115   idealreg2mhdebugmask[Op_RegD] = NULL;
 116   idealreg2mhdebugmask[Op_RegP] = NULL;

 117   idealreg2mhdebugmask[Op_VecS] = NULL;
 118   idealreg2mhdebugmask[Op_VecD] = NULL;
 119   idealreg2mhdebugmask[Op_VecX] = NULL;
 120   idealreg2mhdebugmask[Op_VecY] = NULL;
 121   idealreg2mhdebugmask[Op_VecZ] = NULL;
 122   idealreg2mhdebugmask[Op_RegFlags] = NULL;
 123 
 124   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 125 }
 126 
 127 //------------------------------warp_incoming_stk_arg------------------------
 128 // This warps a VMReg into an OptoReg::Name
 129 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 130   OptoReg::Name warped;
 131   if( reg->is_stack() ) {  // Stack slot argument?
 132     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 133     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 134     if( warped >= _in_arg_limit )
 135       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 136     if (!RegMask::can_represent_arg(warped)) {

 410 //------------------------------Fixup_Save_On_Entry----------------------------
 411 // The stated purpose of this routine is to take care of save-on-entry
 412 // registers.  However, the overall goal of the Match phase is to convert into
 413 // machine-specific instructions which have RegMasks to guide allocation.
 414 // So what this procedure really does is put a valid RegMask on each input
 415 // to the machine-specific variations of all Return, TailCall and Halt
 416 // instructions.  It also adds edgs to define the save-on-entry values (and of
 417 // course gives them a mask).
 418 
 419 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 420   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 421   // Do all the pre-defined register masks
 422   rms[TypeFunc::Control  ] = RegMask::Empty;
 423   rms[TypeFunc::I_O      ] = RegMask::Empty;
 424   rms[TypeFunc::Memory   ] = RegMask::Empty;
 425   rms[TypeFunc::ReturnAdr] = ret_adr;
 426   rms[TypeFunc::FramePtr ] = fp;
 427   return rms;
 428 }
 429 
 430 #define NOF_STACK_MASKS (3*6+5)
 431 
 432 // Create the initial stack mask used by values spilling to the stack.
 433 // Disallow any debug info in outgoing argument areas by setting the
 434 // initial mask accordingly.
 435 void Matcher::init_first_stack_mask() {
 436 
 437   // Allocate storage for spill masks as masks for the appropriate load type.
 438   RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * NOF_STACK_MASKS);
 439 
 440   // Initialize empty placeholder masks into the newly allocated arena
 441   for (int i = 0; i < NOF_STACK_MASKS; i++) {
 442     new (rms + i) RegMask();
 443   }
 444 
 445   idealreg2spillmask  [Op_RegN] = &rms[0];
 446   idealreg2spillmask  [Op_RegI] = &rms[1];
 447   idealreg2spillmask  [Op_RegL] = &rms[2];
 448   idealreg2spillmask  [Op_RegF] = &rms[3];
 449   idealreg2spillmask  [Op_RegD] = &rms[4];
 450   idealreg2spillmask  [Op_RegP] = &rms[5];
 451 
 452   idealreg2debugmask  [Op_RegN] = &rms[6];
 453   idealreg2debugmask  [Op_RegI] = &rms[7];
 454   idealreg2debugmask  [Op_RegL] = &rms[8];
 455   idealreg2debugmask  [Op_RegF] = &rms[9];
 456   idealreg2debugmask  [Op_RegD] = &rms[10];
 457   idealreg2debugmask  [Op_RegP] = &rms[11];
 458 
 459   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 460   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 461   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 462   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 463   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 464   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 465 
 466   idealreg2spillmask  [Op_VecS] = &rms[18];
 467   idealreg2spillmask  [Op_VecD] = &rms[19];
 468   idealreg2spillmask  [Op_VecX] = &rms[20];
 469   idealreg2spillmask  [Op_VecY] = &rms[21];
 470   idealreg2spillmask  [Op_VecZ] = &rms[22];

 471 
 472   OptoReg::Name i;
 473 
 474   // At first, start with the empty mask
 475   C->FIRST_STACK_mask().Clear();
 476 
 477   // Add in the incoming argument area
 478   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 479   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 480     C->FIRST_STACK_mask().Insert(i);
 481   }
 482   // Add in all bits past the outgoing argument area
 483   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 484             "must be able to represent all call arguments in reg mask");
 485   OptoReg::Name init = _out_arg_limit;
 486   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 487     C->FIRST_STACK_mask().Insert(i);
 488   }
 489   // Finally, set the "infinite stack" bit.
 490   C->FIRST_STACK_mask().set_AllStack();
 491 
 492   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
 493   RegMask aligned_stack_mask = C->FIRST_STACK_mask();
 494   // Keep spill masks aligned.
 495   aligned_stack_mask.clear_to_pairs();
 496   assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");

 497 
 498   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 499 #ifdef _LP64
 500   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 501    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 502    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 503 #else
 504    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 505 #endif
 506   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 507    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 508   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 509    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 510   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 511    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 512   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 513    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 514 
 515   if (Matcher::vector_size_supported(T_BYTE,4)) {
 516     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];

 547       aligned_stack_mask.Remove(in);
 548       in = OptoReg::add(in, -1);
 549     }
 550      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 551      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 552     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 553      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 554   }
 555   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 556     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 557     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 558     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 559       aligned_stack_mask.Remove(in);
 560       in = OptoReg::add(in, -1);
 561     }
 562      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 563      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 564     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 565      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 566   }
 567    if (UseFPUForSpilling) {
 568      // This mask logic assumes that the spill operations are
 569      // symmetric and that the registers involved are the same size.
 570      // On sparc for instance we may have to use 64 bit moves will
 571      // kill 2 registers when used with F0-F31.
 572      idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 573      idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);




















 574 #ifdef _LP64
 575      idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 576      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 577      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 578      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 579 #else
 580      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 581 #ifdef ARM
 582      // ARM has support for moving 64bit values between a pair of
 583      // integer registers and a double register
 584      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 585      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 586 #endif
 587 #endif
 588    }
 589 
 590   // Make up debug masks.  Any spill slot plus callee-save (SOE) registers.
 591   // Caller-save (SOC, AS) registers are assumed to be trashable by the various
 592   // inline-cache fixup routines.
 593   *idealreg2debugmask  [Op_RegN] = *idealreg2spillmask[Op_RegN];
 594   *idealreg2debugmask  [Op_RegI] = *idealreg2spillmask[Op_RegI];
 595   *idealreg2debugmask  [Op_RegL] = *idealreg2spillmask[Op_RegL];
 596   *idealreg2debugmask  [Op_RegF] = *idealreg2spillmask[Op_RegF];
 597   *idealreg2debugmask  [Op_RegD] = *idealreg2spillmask[Op_RegD];
 598   *idealreg2debugmask  [Op_RegP] = *idealreg2spillmask[Op_RegP];
 599 
 600   *idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
 601   *idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
 602   *idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
 603   *idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
 604   *idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
 605   *idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
 606 
 607   // Prevent stub compilations from attempting to reference
 608   // callee-saved (SOE) registers from debug info

 861   // Also exclude the register we use to save the SP for MethodHandle
 862   // invokes to from the corresponding MH debug masks
 863   const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
 864   mh_caller_save_regmask.OR(sp_save_mask);
 865   mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
 866 
 867   // Grab the Frame Pointer
 868   Node *fp  = ret->in(TypeFunc::FramePtr);
 869   // Share frame pointer while making spill ops
 870   set_shared(fp);
 871 
 872 // Get the ADLC notion of the right regmask, for each basic type.
 873 #ifdef _LP64
 874   idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
 875 #endif
 876   idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
 877   idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
 878   idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
 879   idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
 880   idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);

 881   idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
 882   idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
 883   idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
 884   idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
 885   idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
 886 }
 887 
 888 #ifdef ASSERT
 889 static void match_alias_type(Compile* C, Node* n, Node* m) {
 890   if (!VerifyAliases)  return;  // do not go looking for trouble by default
 891   const TypePtr* nat = n->adr_type();
 892   const TypePtr* mat = m->adr_type();
 893   int nidx = C->get_alias_index(nat);
 894   int midx = C->get_alias_index(mat);
 895   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
 896   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
 897     for (uint i = 1; i < n->req(); i++) {
 898       Node* n1 = n->in(i);
 899       const TypePtr* n1at = n1->adr_type();
 900       if (n1at != NULL) {

1546         (input_mem == NodeSentinel) ) {
1547       // Print when we exclude matching due to different memory states at input-loads
1548       if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1549           && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1550         tty->print_cr("invalid input_mem");
1551       }
1552       // Switch to a register-only opcode; this value must be in a register
1553       // and cannot be subsumed as part of a larger instruction.
1554       s->DFA( m->ideal_reg(), m );
1555 
1556     } else {
1557       // If match tree has no control and we do, adopt it for entire tree
1558       if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1559         control = m->in(0);         // Pick up control
1560       // Else match as a normal part of the match tree.
1561       control = Label_Root(m, s, control, mem);
1562       if (C->failing()) return NULL;
1563     }
1564   }
1565 
1566 
1567   // Call DFA to match this node, and return
1568   svec->DFA( n->Opcode(), n );
1569 
1570 #ifdef ASSERT
1571   uint x;
1572   for( x = 0; x < _LAST_MACH_OPER; x++ )
1573     if( svec->valid(x) )
1574       break;
1575 
1576   if (x >= _LAST_MACH_OPER) {
1577     n->dump();
1578     svec->dump();
1579     assert( false, "bad AD file" );
1580   }
1581 #endif
1582   return control;
1583 }
1584 
1585 
1586 // Con nodes reduced using the same rule can share their MachNode

2396   }
2397 }
2398 
2399 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2400   // Advice matcher to perform null checks on the narrow oop side.
2401   // Implicit checks are not possible on the uncompressed oop side anyway
2402   // (at least not for read accesses).
2403   // Performs significantly better (especially on Power 6).
2404   if (!os::zero_page_read_protected()) {
2405     return true;
2406   }
2407   return CompressedOops::use_implicit_null_checks() &&
2408          (narrow_oop_use_complex_address() ||
2409           CompressedOops::base() != NULL);
2410 }
2411 
2412 // Compute RegMask for an ideal register.
2413 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2414   const Type* t = Type::mreg2type[ideal_reg];
2415   if (t == NULL) {
2416     assert(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2417     return NULL; // not supported
2418   }
2419   Node* fp  = ret->in(TypeFunc::FramePtr);
2420   Node* mem = ret->in(TypeFunc::Memory);
2421   const TypePtr* atp = TypePtr::BOTTOM;
2422   MemNode::MemOrd mo = MemNode::unordered;
2423 
2424   Node* spill;
2425   switch (ideal_reg) {
2426     case Op_RegN: spill = new LoadNNode(NULL, mem, fp, atp, t->is_narrowoop(), mo); break;
2427     case Op_RegI: spill = new LoadINode(NULL, mem, fp, atp, t->is_int(),       mo); break;
2428     case Op_RegP: spill = new LoadPNode(NULL, mem, fp, atp, t->is_ptr(),       mo); break;
2429     case Op_RegF: spill = new LoadFNode(NULL, mem, fp, atp, t,                 mo); break;
2430     case Op_RegD: spill = new LoadDNode(NULL, mem, fp, atp, t,                 mo); break;
2431     case Op_RegL: spill = new LoadLNode(NULL, mem, fp, atp, t->is_long(),      mo); break;
2432 

2433     case Op_VecS: // fall-through
2434     case Op_VecD: // fall-through
2435     case Op_VecX: // fall-through
2436     case Op_VecY: // fall-through
2437     case Op_VecZ: spill = new LoadVectorNode(NULL, mem, fp, atp, t->is_vect()); break;
2438 
2439     default: ShouldNotReachHere();
2440   }
2441   MachNode* mspill = match_tree(spill);
2442   assert(mspill != NULL, "matching failed: %d", ideal_reg);
2443   // Handle generic vector operand case
2444   if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2445     specialize_mach_node(mspill);
2446   }
2447   return &mspill->out_RegMask();
2448 }
2449 
2450 // Process Mach IR right after selection phase is over.
2451 void Matcher::do_postselect_cleanup() {
2452   if (supports_generic_vector_operands) {

  71   _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  72   _must_clone(must_clone),
  73   _shared_nodes(C->comp_arena()),
  74 #ifdef ASSERT
  75   _old2new_map(C->comp_arena()),
  76   _new2old_map(C->comp_arena()),
  77 #endif
  78   _allocation_started(false),
  79   _ruleName(ruleName),
  80   _register_save_policy(register_save_policy),
  81   _c_reg_save_policy(c_reg_save_policy),
  82   _register_save_type(register_save_type) {
  83   C->set_matcher(this);
  84 
  85   idealreg2spillmask  [Op_RegI] = NULL;
  86   idealreg2spillmask  [Op_RegN] = NULL;
  87   idealreg2spillmask  [Op_RegL] = NULL;
  88   idealreg2spillmask  [Op_RegF] = NULL;
  89   idealreg2spillmask  [Op_RegD] = NULL;
  90   idealreg2spillmask  [Op_RegP] = NULL;
  91   idealreg2spillmask  [Op_VecA] = NULL;
  92   idealreg2spillmask  [Op_VecS] = NULL;
  93   idealreg2spillmask  [Op_VecD] = NULL;
  94   idealreg2spillmask  [Op_VecX] = NULL;
  95   idealreg2spillmask  [Op_VecY] = NULL;
  96   idealreg2spillmask  [Op_VecZ] = NULL;
  97   idealreg2spillmask  [Op_RegFlags] = NULL;
  98 
  99   idealreg2debugmask  [Op_RegI] = NULL;
 100   idealreg2debugmask  [Op_RegN] = NULL;
 101   idealreg2debugmask  [Op_RegL] = NULL;
 102   idealreg2debugmask  [Op_RegF] = NULL;
 103   idealreg2debugmask  [Op_RegD] = NULL;
 104   idealreg2debugmask  [Op_RegP] = NULL;
 105   idealreg2debugmask  [Op_VecA] = NULL;
 106   idealreg2debugmask  [Op_VecS] = NULL;
 107   idealreg2debugmask  [Op_VecD] = NULL;
 108   idealreg2debugmask  [Op_VecX] = NULL;
 109   idealreg2debugmask  [Op_VecY] = NULL;
 110   idealreg2debugmask  [Op_VecZ] = NULL;
 111   idealreg2debugmask  [Op_RegFlags] = NULL;
 112 
 113   idealreg2mhdebugmask[Op_RegI] = NULL;
 114   idealreg2mhdebugmask[Op_RegN] = NULL;
 115   idealreg2mhdebugmask[Op_RegL] = NULL;
 116   idealreg2mhdebugmask[Op_RegF] = NULL;
 117   idealreg2mhdebugmask[Op_RegD] = NULL;
 118   idealreg2mhdebugmask[Op_RegP] = NULL;
 119   idealreg2mhdebugmask[Op_VecA] = NULL;
 120   idealreg2mhdebugmask[Op_VecS] = NULL;
 121   idealreg2mhdebugmask[Op_VecD] = NULL;
 122   idealreg2mhdebugmask[Op_VecX] = NULL;
 123   idealreg2mhdebugmask[Op_VecY] = NULL;
 124   idealreg2mhdebugmask[Op_VecZ] = NULL;
 125   idealreg2mhdebugmask[Op_RegFlags] = NULL;
 126 
 127   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 128 }
 129 
 130 //------------------------------warp_incoming_stk_arg------------------------
 131 // This warps a VMReg into an OptoReg::Name
 132 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 133   OptoReg::Name warped;
 134   if( reg->is_stack() ) {  // Stack slot argument?
 135     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 136     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 137     if( warped >= _in_arg_limit )
 138       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 139     if (!RegMask::can_represent_arg(warped)) {

 413 //------------------------------Fixup_Save_On_Entry----------------------------
 414 // The stated purpose of this routine is to take care of save-on-entry
 415 // registers.  However, the overall goal of the Match phase is to convert into
 416 // machine-specific instructions which have RegMasks to guide allocation.
 417 // So what this procedure really does is put a valid RegMask on each input
 418 // to the machine-specific variations of all Return, TailCall and Halt
 419 // instructions.  It also adds edgs to define the save-on-entry values (and of
 420 // course gives them a mask).
 421 
 422 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 423   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 424   // Do all the pre-defined register masks
 425   rms[TypeFunc::Control  ] = RegMask::Empty;
 426   rms[TypeFunc::I_O      ] = RegMask::Empty;
 427   rms[TypeFunc::Memory   ] = RegMask::Empty;
 428   rms[TypeFunc::ReturnAdr] = ret_adr;
 429   rms[TypeFunc::FramePtr ] = fp;
 430   return rms;
 431 }
 432 
 433 #define NOF_STACK_MASKS (3*6+6)
 434 
 435 // Create the initial stack mask used by values spilling to the stack.
 436 // Disallow any debug info in outgoing argument areas by setting the
 437 // initial mask accordingly.
 438 void Matcher::init_first_stack_mask() {
 439 
 440   // Allocate storage for spill masks as masks for the appropriate load type.
 441   RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * NOF_STACK_MASKS);
 442 
 443   // Initialize empty placeholder masks into the newly allocated arena
 444   for (int i = 0; i < NOF_STACK_MASKS; i++) {
 445     new (rms + i) RegMask();
 446   }
 447 
 448   idealreg2spillmask  [Op_RegN] = &rms[0];
 449   idealreg2spillmask  [Op_RegI] = &rms[1];
 450   idealreg2spillmask  [Op_RegL] = &rms[2];
 451   idealreg2spillmask  [Op_RegF] = &rms[3];
 452   idealreg2spillmask  [Op_RegD] = &rms[4];
 453   idealreg2spillmask  [Op_RegP] = &rms[5];
 454 
 455   idealreg2debugmask  [Op_RegN] = &rms[6];
 456   idealreg2debugmask  [Op_RegI] = &rms[7];
 457   idealreg2debugmask  [Op_RegL] = &rms[8];
 458   idealreg2debugmask  [Op_RegF] = &rms[9];
 459   idealreg2debugmask  [Op_RegD] = &rms[10];
 460   idealreg2debugmask  [Op_RegP] = &rms[11];
 461 
 462   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 463   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 464   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 465   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 466   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 467   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 468 
 469   idealreg2spillmask  [Op_VecA] = &rms[18];
 470   idealreg2spillmask  [Op_VecS] = &rms[19];
 471   idealreg2spillmask  [Op_VecD] = &rms[20];
 472   idealreg2spillmask  [Op_VecX] = &rms[21];
 473   idealreg2spillmask  [Op_VecY] = &rms[22];
 474   idealreg2spillmask  [Op_VecZ] = &rms[23];
 475 
 476   OptoReg::Name i;
 477 
 478   // At first, start with the empty mask
 479   C->FIRST_STACK_mask().Clear();
 480 
 481   // Add in the incoming argument area
 482   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 483   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 484     C->FIRST_STACK_mask().Insert(i);
 485   }
 486   // Add in all bits past the outgoing argument area
 487   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 488             "must be able to represent all call arguments in reg mask");
 489   OptoReg::Name init = _out_arg_limit;
 490   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 491     C->FIRST_STACK_mask().Insert(i);
 492   }
 493   // Finally, set the "infinite stack" bit.
 494   C->FIRST_STACK_mask().set_AllStack();
 495 
 496   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
 497   RegMask aligned_stack_mask = C->FIRST_STACK_mask();
 498   // Keep spill masks aligned.
 499   aligned_stack_mask.clear_to_pairs();
 500   assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 501   RegMask scalable_stack_mask = aligned_stack_mask;
 502 
 503   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 504 #ifdef _LP64
 505   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 506    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 507    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 508 #else
 509    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 510 #endif
 511   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 512    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 513   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 514    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 515   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 516    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 517   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 518    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 519 
 520   if (Matcher::vector_size_supported(T_BYTE,4)) {
 521     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];

 552       aligned_stack_mask.Remove(in);
 553       in = OptoReg::add(in, -1);
 554     }
 555      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 556      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 557     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 558      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 559   }
 560   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 561     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 562     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 563     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 564       aligned_stack_mask.Remove(in);
 565       in = OptoReg::add(in, -1);
 566     }
 567      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 568      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 569     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 570      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 571   }
 572 
 573   if (Matcher::supports_scalable_vector()) {
 574     int k = 1;
 575     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 576     // Exclude last input arg stack slots to avoid spilling vector register there,
 577     // otherwise vector spills could stomp over stack slots in caller frame.
 578     for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
 579       scalable_stack_mask.Remove(in);
 580       in = OptoReg::add(in, -1);
 581     }
 582 
 583     // For VecA
 584      scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
 585      assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 586     *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
 587      idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
 588   } else {
 589     *idealreg2spillmask[Op_VecA] = RegMask::Empty;
 590   }
 591 
 592   if (UseFPUForSpilling) {
 593     // This mask logic assumes that the spill operations are
 594     // symmetric and that the registers involved are the same size.
 595     // On sparc for instance we may have to use 64 bit moves will
 596     // kill 2 registers when used with F0-F31.
 597     idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 598     idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
 599 #ifdef _LP64
 600     idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 601     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 602     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 603     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 604 #else
 605     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 606 #ifdef ARM
 607     // ARM has support for moving 64bit values between a pair of
 608     // integer registers and a double register
 609     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 610     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 611 #endif
 612 #endif
 613   }
 614 
 615   // Make up debug masks.  Any spill slot plus callee-save (SOE) registers.
 616   // Caller-save (SOC, AS) registers are assumed to be trashable by the various
 617   // inline-cache fixup routines.
 618   *idealreg2debugmask  [Op_RegN] = *idealreg2spillmask[Op_RegN];
 619   *idealreg2debugmask  [Op_RegI] = *idealreg2spillmask[Op_RegI];
 620   *idealreg2debugmask  [Op_RegL] = *idealreg2spillmask[Op_RegL];
 621   *idealreg2debugmask  [Op_RegF] = *idealreg2spillmask[Op_RegF];
 622   *idealreg2debugmask  [Op_RegD] = *idealreg2spillmask[Op_RegD];
 623   *idealreg2debugmask  [Op_RegP] = *idealreg2spillmask[Op_RegP];
 624 
 625   *idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
 626   *idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
 627   *idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
 628   *idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
 629   *idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
 630   *idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
 631 
 632   // Prevent stub compilations from attempting to reference
 633   // callee-saved (SOE) registers from debug info

 886   // Also exclude the register we use to save the SP for MethodHandle
 887   // invokes to from the corresponding MH debug masks
 888   const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
 889   mh_caller_save_regmask.OR(sp_save_mask);
 890   mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
 891 
 892   // Grab the Frame Pointer
 893   Node *fp  = ret->in(TypeFunc::FramePtr);
 894   // Share frame pointer while making spill ops
 895   set_shared(fp);
 896 
 897 // Get the ADLC notion of the right regmask, for each basic type.
 898 #ifdef _LP64
 899   idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
 900 #endif
 901   idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
 902   idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
 903   idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
 904   idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
 905   idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
 906   idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
 907   idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
 908   idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
 909   idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
 910   idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
 911   idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
 912 }
 913 
 914 #ifdef ASSERT
 915 static void match_alias_type(Compile* C, Node* n, Node* m) {
 916   if (!VerifyAliases)  return;  // do not go looking for trouble by default
 917   const TypePtr* nat = n->adr_type();
 918   const TypePtr* mat = m->adr_type();
 919   int nidx = C->get_alias_index(nat);
 920   int midx = C->get_alias_index(mat);
 921   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
 922   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
 923     for (uint i = 1; i < n->req(); i++) {
 924       Node* n1 = n->in(i);
 925       const TypePtr* n1at = n1->adr_type();
 926       if (n1at != NULL) {

1572         (input_mem == NodeSentinel) ) {
1573       // Print when we exclude matching due to different memory states at input-loads
1574       if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1575           && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1576         tty->print_cr("invalid input_mem");
1577       }
1578       // Switch to a register-only opcode; this value must be in a register
1579       // and cannot be subsumed as part of a larger instruction.
1580       s->DFA( m->ideal_reg(), m );
1581 
1582     } else {
1583       // If match tree has no control and we do, adopt it for entire tree
1584       if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1585         control = m->in(0);         // Pick up control
1586       // Else match as a normal part of the match tree.
1587       control = Label_Root(m, s, control, mem);
1588       if (C->failing()) return NULL;
1589     }
1590   }
1591 

1592   // Call DFA to match this node, and return
1593   svec->DFA( n->Opcode(), n );
1594 
1595 #ifdef ASSERT
1596   uint x;
1597   for( x = 0; x < _LAST_MACH_OPER; x++ )
1598     if( svec->valid(x) )
1599       break;
1600 
1601   if (x >= _LAST_MACH_OPER) {
1602     n->dump();
1603     svec->dump();
1604     assert( false, "bad AD file" );
1605   }
1606 #endif
1607   return control;
1608 }
1609 
1610 
1611 // Con nodes reduced using the same rule can share their MachNode

2421   }
2422 }
2423 
2424 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2425   // Advice matcher to perform null checks on the narrow oop side.
2426   // Implicit checks are not possible on the uncompressed oop side anyway
2427   // (at least not for read accesses).
2428   // Performs significantly better (especially on Power 6).
2429   if (!os::zero_page_read_protected()) {
2430     return true;
2431   }
2432   return CompressedOops::use_implicit_null_checks() &&
2433          (narrow_oop_use_complex_address() ||
2434           CompressedOops::base() != NULL);
2435 }
2436 
2437 // Compute RegMask for an ideal register.
2438 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2439   const Type* t = Type::mreg2type[ideal_reg];
2440   if (t == NULL) {
2441     assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2442     return NULL; // not supported
2443   }
2444   Node* fp  = ret->in(TypeFunc::FramePtr);
2445   Node* mem = ret->in(TypeFunc::Memory);
2446   const TypePtr* atp = TypePtr::BOTTOM;
2447   MemNode::MemOrd mo = MemNode::unordered;
2448 
2449   Node* spill;
2450   switch (ideal_reg) {
2451     case Op_RegN: spill = new LoadNNode(NULL, mem, fp, atp, t->is_narrowoop(), mo); break;
2452     case Op_RegI: spill = new LoadINode(NULL, mem, fp, atp, t->is_int(),       mo); break;
2453     case Op_RegP: spill = new LoadPNode(NULL, mem, fp, atp, t->is_ptr(),       mo); break;
2454     case Op_RegF: spill = new LoadFNode(NULL, mem, fp, atp, t,                 mo); break;
2455     case Op_RegD: spill = new LoadDNode(NULL, mem, fp, atp, t,                 mo); break;
2456     case Op_RegL: spill = new LoadLNode(NULL, mem, fp, atp, t->is_long(),      mo); break;
2457 
2458     case Op_VecA: // fall-through
2459     case Op_VecS: // fall-through
2460     case Op_VecD: // fall-through
2461     case Op_VecX: // fall-through
2462     case Op_VecY: // fall-through
2463     case Op_VecZ: spill = new LoadVectorNode(NULL, mem, fp, atp, t->is_vect()); break;
2464 
2465     default: ShouldNotReachHere();
2466   }
2467   MachNode* mspill = match_tree(spill);
2468   assert(mspill != NULL, "matching failed: %d", ideal_reg);
2469   // Handle generic vector operand case
2470   if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2471     specialize_mach_node(mspill);
2472   }
2473   return &mspill->out_RegMask();
2474 }
2475 
2476 // Process Mach IR right after selection phase is over.
2477 void Matcher::do_postselect_cleanup() {
2478   if (supports_generic_vector_operands) {
< prev index next >