< prev index next >

src/share/vm/opto/matcher.cpp

Print this page




  66   _c_reg_save_policy(c_reg_save_policy),
  67   _register_save_type(register_save_type),
  68   _ruleName(ruleName),
  69   _allocation_started(false),
  70   _states_arena(Chunk::medium_size),
  71   _visited(&_states_arena),
  72   _shared(&_states_arena),
  73   _dontcare(&_states_arena) {
  74   C->set_matcher(this);
  75 
  76   idealreg2spillmask  [Op_RegI] = NULL;
  77   idealreg2spillmask  [Op_RegN] = NULL;
  78   idealreg2spillmask  [Op_RegL] = NULL;
  79   idealreg2spillmask  [Op_RegF] = NULL;
  80   idealreg2spillmask  [Op_RegD] = NULL;
  81   idealreg2spillmask  [Op_RegP] = NULL;
  82   idealreg2spillmask  [Op_VecS] = NULL;
  83   idealreg2spillmask  [Op_VecD] = NULL;
  84   idealreg2spillmask  [Op_VecX] = NULL;
  85   idealreg2spillmask  [Op_VecY] = NULL;

  86 
  87   idealreg2debugmask  [Op_RegI] = NULL;
  88   idealreg2debugmask  [Op_RegN] = NULL;
  89   idealreg2debugmask  [Op_RegL] = NULL;
  90   idealreg2debugmask  [Op_RegF] = NULL;
  91   idealreg2debugmask  [Op_RegD] = NULL;
  92   idealreg2debugmask  [Op_RegP] = NULL;
  93   idealreg2debugmask  [Op_VecS] = NULL;
  94   idealreg2debugmask  [Op_VecD] = NULL;
  95   idealreg2debugmask  [Op_VecX] = NULL;
  96   idealreg2debugmask  [Op_VecY] = NULL;

  97 
  98   idealreg2mhdebugmask[Op_RegI] = NULL;
  99   idealreg2mhdebugmask[Op_RegN] = NULL;
 100   idealreg2mhdebugmask[Op_RegL] = NULL;
 101   idealreg2mhdebugmask[Op_RegF] = NULL;
 102   idealreg2mhdebugmask[Op_RegD] = NULL;
 103   idealreg2mhdebugmask[Op_RegP] = NULL;
 104   idealreg2mhdebugmask[Op_VecS] = NULL;
 105   idealreg2mhdebugmask[Op_VecD] = NULL;
 106   idealreg2mhdebugmask[Op_VecX] = NULL;
 107   idealreg2mhdebugmask[Op_VecY] = NULL;

 108 
 109   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 110 }
 111 
 112 //------------------------------warp_incoming_stk_arg------------------------
 113 // This warps a VMReg into an OptoReg::Name
 114 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 115   OptoReg::Name warped;
 116   if( reg->is_stack() ) {  // Stack slot argument?
 117     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 118     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 119     if( warped >= _in_arg_limit )
 120       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 121     if (!RegMask::can_represent_arg(warped)) {
 122       // the compiler cannot represent this method's calling sequence
 123       C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
 124       return OptoReg::Bad;
 125     }
 126     return warped;
 127   }


 396 // course gives them a mask).
 397 
 398 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 399   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 400   // Do all the pre-defined register masks
 401   rms[TypeFunc::Control  ] = RegMask::Empty;
 402   rms[TypeFunc::I_O      ] = RegMask::Empty;
 403   rms[TypeFunc::Memory   ] = RegMask::Empty;
 404   rms[TypeFunc::ReturnAdr] = ret_adr;
 405   rms[TypeFunc::FramePtr ] = fp;
 406   return rms;
 407 }
 408 
 409 //---------------------------init_first_stack_mask-----------------------------
 410 // Create the initial stack mask used by values spilling to the stack.
 411 // Disallow any debug info in outgoing argument areas by setting the
 412 // initial mask accordingly.
 413 void Matcher::init_first_stack_mask() {
 414 
 415   // Allocate storage for spill masks as masks for the appropriate load type.
 416   RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+4));
 417 
 418   idealreg2spillmask  [Op_RegN] = &rms[0];
 419   idealreg2spillmask  [Op_RegI] = &rms[1];
 420   idealreg2spillmask  [Op_RegL] = &rms[2];
 421   idealreg2spillmask  [Op_RegF] = &rms[3];
 422   idealreg2spillmask  [Op_RegD] = &rms[4];
 423   idealreg2spillmask  [Op_RegP] = &rms[5];
 424 
 425   idealreg2debugmask  [Op_RegN] = &rms[6];
 426   idealreg2debugmask  [Op_RegI] = &rms[7];
 427   idealreg2debugmask  [Op_RegL] = &rms[8];
 428   idealreg2debugmask  [Op_RegF] = &rms[9];
 429   idealreg2debugmask  [Op_RegD] = &rms[10];
 430   idealreg2debugmask  [Op_RegP] = &rms[11];
 431 
 432   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 433   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 434   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 435   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 436   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 437   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 438 
 439   idealreg2spillmask  [Op_VecS] = &rms[18];
 440   idealreg2spillmask  [Op_VecD] = &rms[19];
 441   idealreg2spillmask  [Op_VecX] = &rms[20];
 442   idealreg2spillmask  [Op_VecY] = &rms[21];

 443 
 444   OptoReg::Name i;
 445 
 446   // At first, start with the empty mask
 447   C->FIRST_STACK_mask().Clear();
 448 
 449   // Add in the incoming argument area
 450   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 451   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 452     C->FIRST_STACK_mask().Insert(i);
 453   }
 454   // Add in all bits past the outgoing argument area
 455   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 456             "must be able to represent all call arguments in reg mask");
 457   OptoReg::Name init = _out_arg_limit;
 458   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 459     C->FIRST_STACK_mask().Insert(i);
 460   }
 461   // Finally, set the "infinite stack" bit.
 462   C->FIRST_STACK_mask().set_AllStack();


 507       aligned_stack_mask.Remove(in);
 508       in = OptoReg::add(in, -1);
 509     }
 510      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
 511      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 512     *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
 513      idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
 514   }
 515   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 516     // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
 517     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 518     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
 519       aligned_stack_mask.Remove(in);
 520       in = OptoReg::add(in, -1);
 521     }
 522      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 523      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 524     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 525      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 526   }












 527    if (UseFPUForSpilling) {
 528      // This mask logic assumes that the spill operations are
 529      // symmetric and that the registers involved are the same size.
 530      // On sparc for instance we may have to use 64 bit moves will
 531      // kill 2 registers when used with F0-F31.
 532      idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 533      idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
 534 #ifdef _LP64
 535      idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 536      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 537      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 538      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 539 #else
 540      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 541 #ifdef ARM
 542      // ARM has support for moving 64bit values between a pair of
 543      // integer registers and a double register
 544      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 545      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 546 #endif


 844   idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
 845   idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
 846 
 847   // Vector regmasks.
 848   if (Matcher::vector_size_supported(T_BYTE,4)) {
 849     TypeVect::VECTS = TypeVect::make(T_BYTE, 4);
 850     MachNode *spillVectS = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS));
 851     idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask();
 852   }
 853   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 854     MachNode *spillVectD = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD));
 855     idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask();
 856   }
 857   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 858     MachNode *spillVectX = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX));
 859     idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask();
 860   }
 861   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 862     MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
 863     idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask();




 864   }
 865 }
 866 
 867 #ifdef ASSERT
 868 static void match_alias_type(Compile* C, Node* n, Node* m) {
 869   if (!VerifyAliases)  return;  // do not go looking for trouble by default
 870   const TypePtr* nat = n->adr_type();
 871   const TypePtr* mat = m->adr_type();
 872   int nidx = C->get_alias_index(nat);
 873   int midx = C->get_alias_index(mat);
 874   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
 875   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
 876     for (uint i = 1; i < n->req(); i++) {
 877       Node* n1 = n->in(i);
 878       const TypePtr* n1at = n1->adr_type();
 879       if (n1at != NULL) {
 880         nat = n1at;
 881         nidx = C->get_alias_index(n1at);
 882       }
 883     }




  66   _c_reg_save_policy(c_reg_save_policy),
  67   _register_save_type(register_save_type),
  68   _ruleName(ruleName),
  69   _allocation_started(false),
  70   _states_arena(Chunk::medium_size),
  71   _visited(&_states_arena),
  72   _shared(&_states_arena),
  73   _dontcare(&_states_arena) {
  74   C->set_matcher(this);
  75 
  76   idealreg2spillmask  [Op_RegI] = NULL;
  77   idealreg2spillmask  [Op_RegN] = NULL;
  78   idealreg2spillmask  [Op_RegL] = NULL;
  79   idealreg2spillmask  [Op_RegF] = NULL;
  80   idealreg2spillmask  [Op_RegD] = NULL;
  81   idealreg2spillmask  [Op_RegP] = NULL;
  82   idealreg2spillmask  [Op_VecS] = NULL;
  83   idealreg2spillmask  [Op_VecD] = NULL;
  84   idealreg2spillmask  [Op_VecX] = NULL;
  85   idealreg2spillmask  [Op_VecY] = NULL;
  86   idealreg2spillmask  [Op_VecZ] = NULL;
  87 
  88   idealreg2debugmask  [Op_RegI] = NULL;
  89   idealreg2debugmask  [Op_RegN] = NULL;
  90   idealreg2debugmask  [Op_RegL] = NULL;
  91   idealreg2debugmask  [Op_RegF] = NULL;
  92   idealreg2debugmask  [Op_RegD] = NULL;
  93   idealreg2debugmask  [Op_RegP] = NULL;
  94   idealreg2debugmask  [Op_VecS] = NULL;
  95   idealreg2debugmask  [Op_VecD] = NULL;
  96   idealreg2debugmask  [Op_VecX] = NULL;
  97   idealreg2debugmask  [Op_VecY] = NULL;
  98   idealreg2debugmask  [Op_VecZ] = NULL;
  99 
 100   idealreg2mhdebugmask[Op_RegI] = NULL;
 101   idealreg2mhdebugmask[Op_RegN] = NULL;
 102   idealreg2mhdebugmask[Op_RegL] = NULL;
 103   idealreg2mhdebugmask[Op_RegF] = NULL;
 104   idealreg2mhdebugmask[Op_RegD] = NULL;
 105   idealreg2mhdebugmask[Op_RegP] = NULL;
 106   idealreg2mhdebugmask[Op_VecS] = NULL;
 107   idealreg2mhdebugmask[Op_VecD] = NULL;
 108   idealreg2mhdebugmask[Op_VecX] = NULL;
 109   idealreg2mhdebugmask[Op_VecY] = NULL;
 110   idealreg2mhdebugmask[Op_VecZ] = NULL;
 111 
 112   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 113 }
 114 
 115 //------------------------------warp_incoming_stk_arg------------------------
 116 // This warps a VMReg into an OptoReg::Name
 117 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 118   OptoReg::Name warped;
 119   if( reg->is_stack() ) {  // Stack slot argument?
 120     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 121     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 122     if( warped >= _in_arg_limit )
 123       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 124     if (!RegMask::can_represent_arg(warped)) {
 125       // the compiler cannot represent this method's calling sequence
 126       C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
 127       return OptoReg::Bad;
 128     }
 129     return warped;
 130   }


 399 // course gives them a mask).
 400 
 401 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 402   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 403   // Do all the pre-defined register masks
 404   rms[TypeFunc::Control  ] = RegMask::Empty;
 405   rms[TypeFunc::I_O      ] = RegMask::Empty;
 406   rms[TypeFunc::Memory   ] = RegMask::Empty;
 407   rms[TypeFunc::ReturnAdr] = ret_adr;
 408   rms[TypeFunc::FramePtr ] = fp;
 409   return rms;
 410 }
 411 
 412 //---------------------------init_first_stack_mask-----------------------------
 413 // Create the initial stack mask used by values spilling to the stack.
 414 // Disallow any debug info in outgoing argument areas by setting the
 415 // initial mask accordingly.
 416 void Matcher::init_first_stack_mask() {
 417 
 418   // Allocate storage for spill masks as masks for the appropriate load type.
 419   RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+5));
 420 
 421   idealreg2spillmask  [Op_RegN] = &rms[0];
 422   idealreg2spillmask  [Op_RegI] = &rms[1];
 423   idealreg2spillmask  [Op_RegL] = &rms[2];
 424   idealreg2spillmask  [Op_RegF] = &rms[3];
 425   idealreg2spillmask  [Op_RegD] = &rms[4];
 426   idealreg2spillmask  [Op_RegP] = &rms[5];
 427 
 428   idealreg2debugmask  [Op_RegN] = &rms[6];
 429   idealreg2debugmask  [Op_RegI] = &rms[7];
 430   idealreg2debugmask  [Op_RegL] = &rms[8];
 431   idealreg2debugmask  [Op_RegF] = &rms[9];
 432   idealreg2debugmask  [Op_RegD] = &rms[10];
 433   idealreg2debugmask  [Op_RegP] = &rms[11];
 434 
 435   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 436   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 437   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 438   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 439   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 440   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 441 
 442   idealreg2spillmask  [Op_VecS] = &rms[18];
 443   idealreg2spillmask  [Op_VecD] = &rms[19];
 444   idealreg2spillmask  [Op_VecX] = &rms[20];
 445   idealreg2spillmask  [Op_VecY] = &rms[21];
 446   idealreg2spillmask  [Op_VecZ] = &rms[22];
 447 
 448   OptoReg::Name i;
 449 
 450   // At first, start with the empty mask
 451   C->FIRST_STACK_mask().Clear();
 452 
 453   // Add in the incoming argument area
 454   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 455   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 456     C->FIRST_STACK_mask().Insert(i);
 457   }
 458   // Add in all bits past the outgoing argument area
 459   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 460             "must be able to represent all call arguments in reg mask");
 461   OptoReg::Name init = _out_arg_limit;
 462   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 463     C->FIRST_STACK_mask().Insert(i);
 464   }
 465   // Finally, set the "infinite stack" bit.
 466   C->FIRST_STACK_mask().set_AllStack();


 511       aligned_stack_mask.Remove(in);
 512       in = OptoReg::add(in, -1);
 513     }
 514      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
 515      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 516     *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
 517      idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
 518   }
 519   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 520     // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
 521     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 522     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
 523       aligned_stack_mask.Remove(in);
 524       in = OptoReg::add(in, -1);
 525     }
 526      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 527      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 528     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 529      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 530   }
 531   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 532     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 533     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 534     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 535       aligned_stack_mask.Remove(in);
 536       in = OptoReg::add(in, -1);
 537     }
 538      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 539      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 540     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 541      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 542   }
 543    if (UseFPUForSpilling) {
 544      // This mask logic assumes that the spill operations are
 545      // symmetric and that the registers involved are the same size.
 546      // On sparc for instance we may have to use 64 bit moves will
 547      // kill 2 registers when used with F0-F31.
 548      idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 549      idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
 550 #ifdef _LP64
 551      idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 552      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 553      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 554      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 555 #else
 556      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 557 #ifdef ARM
 558      // ARM has support for moving 64bit values between a pair of
 559      // integer registers and a double register
 560      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 561      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 562 #endif


 860   idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
 861   idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
 862 
 863   // Vector regmasks.
 864   if (Matcher::vector_size_supported(T_BYTE,4)) {
 865     TypeVect::VECTS = TypeVect::make(T_BYTE, 4);
 866     MachNode *spillVectS = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS));
 867     idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask();
 868   }
 869   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 870     MachNode *spillVectD = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD));
 871     idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask();
 872   }
 873   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 874     MachNode *spillVectX = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX));
 875     idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask();
 876   }
 877   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 878     MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
 879     idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask();
 880   }
 881   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 882     MachNode *spillVectZ = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTZ));
 883     idealreg2regmask[Op_VecZ] = &spillVectZ->out_RegMask();
 884   }
 885 }
 886 
 887 #ifdef ASSERT
 888 static void match_alias_type(Compile* C, Node* n, Node* m) {
 889   if (!VerifyAliases)  return;  // do not go looking for trouble by default
 890   const TypePtr* nat = n->adr_type();
 891   const TypePtr* mat = m->adr_type();
 892   int nidx = C->get_alias_index(nat);
 893   int midx = C->get_alias_index(mat);
 894   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
 895   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
 896     for (uint i = 1; i < n->req(); i++) {
 897       Node* n1 = n->in(i);
 898       const TypePtr* n1at = n1->adr_type();
 899       if (n1at != NULL) {
 900         nat = n1at;
 901         nidx = C->get_alias_index(n1at);
 902       }
 903     }


< prev index next >