406 //------------------------------Fixup_Save_On_Entry----------------------------
407 // The stated purpose of this routine is to take care of save-on-entry
408 // registers. However, the overall goal of the Match phase is to convert into
409 // machine-specific instructions which have RegMasks to guide allocation.
410 // So what this procedure really does is put a valid RegMask on each input
411 // to the machine-specific variations of all Return, TailCall and Halt
412 // instructions. It also adds edgs to define the save-on-entry values (and of
413 // course gives them a mask).
414
415 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
416 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
417 // Do all the pre-defined register masks
418 rms[TypeFunc::Control ] = RegMask::Empty;
419 rms[TypeFunc::I_O ] = RegMask::Empty;
420 rms[TypeFunc::Memory ] = RegMask::Empty;
421 rms[TypeFunc::ReturnAdr] = ret_adr;
422 rms[TypeFunc::FramePtr ] = fp;
423 return rms;
424 }
425
426 #define NOF_STACK_MASKS (3*6+5)
427
428 // Create the initial stack mask used by values spilling to the stack.
429 // Disallow any debug info in outgoing argument areas by setting the
430 // initial mask accordingly.
431 void Matcher::init_first_stack_mask() {
432
433 // Allocate storage for spill masks as masks for the appropriate load type.
434 RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * NOF_STACK_MASKS);
435
436 // Initialize empty placeholder masks into the newly allocated arena
437 for (int i = 0; i < NOF_STACK_MASKS; i++) {
438 new (rms + i) RegMask();
439 }
440
441 idealreg2spillmask [Op_RegN] = &rms[0];
442 idealreg2spillmask [Op_RegI] = &rms[1];
443 idealreg2spillmask [Op_RegL] = &rms[2];
444 idealreg2spillmask [Op_RegF] = &rms[3];
445 idealreg2spillmask [Op_RegD] = &rms[4];
446 idealreg2spillmask [Op_RegP] = &rms[5];
448 idealreg2debugmask [Op_RegN] = &rms[6];
449 idealreg2debugmask [Op_RegI] = &rms[7];
450 idealreg2debugmask [Op_RegL] = &rms[8];
451 idealreg2debugmask [Op_RegF] = &rms[9];
452 idealreg2debugmask [Op_RegD] = &rms[10];
453 idealreg2debugmask [Op_RegP] = &rms[11];
454
455 idealreg2mhdebugmask[Op_RegN] = &rms[12];
456 idealreg2mhdebugmask[Op_RegI] = &rms[13];
457 idealreg2mhdebugmask[Op_RegL] = &rms[14];
458 idealreg2mhdebugmask[Op_RegF] = &rms[15];
459 idealreg2mhdebugmask[Op_RegD] = &rms[16];
460 idealreg2mhdebugmask[Op_RegP] = &rms[17];
461
462 idealreg2spillmask [Op_VecS] = &rms[18];
463 idealreg2spillmask [Op_VecD] = &rms[19];
464 idealreg2spillmask [Op_VecX] = &rms[20];
465 idealreg2spillmask [Op_VecY] = &rms[21];
466 idealreg2spillmask [Op_VecZ] = &rms[22];
467
468 OptoReg::Name i;
469
470 // At first, start with the empty mask
471 C->FIRST_STACK_mask().Clear();
472
473 // Add in the incoming argument area
474 OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
475 for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
476 C->FIRST_STACK_mask().Insert(i);
477 }
478 // Add in all bits past the outgoing argument area
479 guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
480 "must be able to represent all call arguments in reg mask");
481 OptoReg::Name init = _out_arg_limit;
482 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
483 C->FIRST_STACK_mask().Insert(i);
484 }
485 // Finally, set the "infinite stack" bit.
486 C->FIRST_STACK_mask().set_AllStack();
487
494 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
495 #ifdef _LP64
496 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
497 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
498 idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
499 #else
500 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
501 #endif
502 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
503 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
504 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
505 idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
506 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
507 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
508 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
509 idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
510
511 if (Matcher::vector_size_supported(T_BYTE,4)) {
512 *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
513 idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
514 }
515 if (Matcher::vector_size_supported(T_FLOAT,2)) {
516 // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
517 // RA guarantees such alignment since it is needed for Double and Long values.
518 *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
519 idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
520 }
521 if (Matcher::vector_size_supported(T_FLOAT,4)) {
522 // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
523 //
524 // RA can use input arguments stack slots for spills but until RA
525 // we don't know frame size and offset of input arg stack slots.
526 //
527 // Exclude last input arg stack slots to avoid spilling vectors there
528 // otherwise vector spills could stomp over stack slots in caller frame.
529 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
530 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
531 aligned_stack_mask.Remove(in);
532 in = OptoReg::add(in, -1);
533 }
534 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
535 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
536 *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
537 idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
538 }
539 if (Matcher::vector_size_supported(T_FLOAT,8)) {
540 // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
541 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
542 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
543 aligned_stack_mask.Remove(in);
544 in = OptoReg::add(in, -1);
545 }
546 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
547 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
548 *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
549 idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
550 }
551 if (Matcher::vector_size_supported(T_FLOAT,16)) {
552 // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
553 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
554 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
555 aligned_stack_mask.Remove(in);
556 in = OptoReg::add(in, -1);
557 }
558 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
559 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
560 *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
561 idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
562 }
563 if (UseFPUForSpilling) {
564 // This mask logic assumes that the spill operations are
565 // symmetric and that the registers involved are the same size.
566 // On sparc for instance we may have to use 64 bit moves will
567 // kill 2 registers when used with F0-F31.
568 idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
569 idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
570 #ifdef _LP64
571 idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
572 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
573 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
574 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
575 #else
576 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
577 #ifdef ARM
578 // ARM has support for moving 64bit values between a pair of
579 // integer registers and a double register
580 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
581 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
582 #endif
583 #endif
584 }
585
586 // Make up debug masks. Any spill slot plus callee-save registers.
587 // Caller-save registers are assumed to be trashable by the various
588 // inline-cache fixup routines.
589 *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
590 *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
591 *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
592 *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
593 *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
594 *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
595
596 *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
597 *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
598 *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
599 *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
600 *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
601 *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
602
603 // Prevent stub compilations from attempting to reference
604 // callee-saved registers from debug info
605 bool exclude_soe = !Compile::current()->is_method_compilation();
606
607 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
608 // registers the caller has to save do not work
609 if( _register_save_policy[i] == 'C' ||
610 _register_save_policy[i] == 'A' ||
611 (_register_save_policy[i] == 'E' && exclude_soe) ) {
612 idealreg2debugmask [Op_RegN]->Remove(i);
613 idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
614 idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
615 idealreg2debugmask [Op_RegF]->Remove(i); // masks
616 idealreg2debugmask [Op_RegD]->Remove(i);
617 idealreg2debugmask [Op_RegP]->Remove(i);
618
619 idealreg2mhdebugmask[Op_RegN]->Remove(i);
620 idealreg2mhdebugmask[Op_RegI]->Remove(i);
621 idealreg2mhdebugmask[Op_RegL]->Remove(i);
622 idealreg2mhdebugmask[Op_RegF]->Remove(i);
623 idealreg2mhdebugmask[Op_RegD]->Remove(i);
624 idealreg2mhdebugmask[Op_RegP]->Remove(i);
625 }
626 }
627
628 // Subtract the register we use to save the SP for MethodHandle
629 // invokes to from the debug mask.
630 const RegMask save_mask = method_handle_invoke_SP_save_mask();
631 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
632 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
633 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
634 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
635 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
636 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
637 }
638
639 //---------------------------is_save_on_entry----------------------------------
640 bool Matcher::is_save_on_entry( int reg ) {
641 return
642 _register_save_policy[reg] == 'E' ||
643 _register_save_policy[reg] == 'A' || // Save-on-entry register?
644 // Also save argument registers in the trampolining stubs
645 (C->save_argument_registers() && is_spillable_arg(reg));
646 }
647
648 //---------------------------Fixup_Save_On_Entry-------------------------------
649 void Matcher::Fixup_Save_On_Entry( ) {
650 init_first_stack_mask();
651
652 Node *root = C->root(); // Short name for root
653 // Count number of save-on-entry registers.
654 uint soe_cnt = number_of_saved_registers();
655 uint i;
656
1909
1910 //------------------------------find_receiver----------------------------------
1911 // For a given signature, return the OptoReg for parameter 0.
1912 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1913 VMRegPair regs;
1914 BasicType sig_bt = T_OBJECT;
1915 calling_convention(&sig_bt, ®s, 1, is_outgoing);
1916 // Return argument 0 register. In the LP64 build pointers
1917 // take 2 registers, but the VM wants only the 'main' name.
1918 return OptoReg::as_OptoReg(regs.first());
1919 }
1920
1921 bool Matcher::is_vshift_con_pattern(Node *n, Node *m) {
1922 if (n != NULL && m != NULL) {
1923 return VectorNode::is_vector_shift(n) &&
1924 VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
1925 }
1926 return false;
1927 }
1928
1929
1930 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
1931 // Must clone all producers of flags, or we will not match correctly.
1932 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
1933 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
1934 // are also there, so we may match a float-branch to int-flags and
1935 // expect the allocator to haul the flags from the int-side to the
1936 // fp-side. No can do.
1937 if (_must_clone[m->Opcode()]) {
1938 mstack.push(m, Visit);
1939 return true;
1940 }
1941 return pd_clone_node(n, m, mstack);
1942 }
1943
1944 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
1945 Node *off = m->in(AddPNode::Offset);
1946 if (off->is_Con()) {
1947 address_visited.test_set(m->_idx); // Flag as address_visited
1948 mstack.push(m->in(AddPNode::Address), Pre_Visit);
1949 // Clone X+offset as it also folds into most addressing expressions
2247 case Op_StrCompressedCopy:
2248 case Op_StrInflatedCopy:
2249 case Op_EncodeISOArray: {
2250 // Restructure into a binary tree for Matching.
2251 Node* pair = new BinaryNode(n->in(3), n->in(4));
2252 n->set_req(3, pair);
2253 n->del_req(4);
2254 break;
2255 }
2256 case Op_FmaD:
2257 case Op_FmaF:
2258 case Op_FmaVD:
2259 case Op_FmaVF: {
2260 // Restructure into a binary tree for Matching.
2261 Node* pair = new BinaryNode(n->in(1), n->in(2));
2262 n->set_req(2, pair);
2263 n->set_req(1, n->in(3));
2264 n->del_req(3);
2265 break;
2266 }
2267 case Op_MulAddS2I: {
2268 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2269 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2270 n->set_req(1, pair1);
2271 n->set_req(2, pair2);
2272 n->del_req(4);
2273 n->del_req(3);
2274 break;
2275 }
2276 default:
2277 break;
2278 }
2279 }
2280
2281 #ifdef ASSERT
2282 // machine-independent root to machine-dependent root
2283 void Matcher::dump_old2new_map() {
2284 _old2new_map.dump();
2285 }
2286 #endif
2287
2288 //---------------------------collect_null_checks-------------------------------
2289 // Find null checks in the ideal graph; write a machine-specific node for
2290 // it. Used by later implicit-null-check handling. Actually collects
2291 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2292 // value being tested.
2293 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2294 Node *iff = proj->in(0);
2295 if( iff->Opcode() == Op_If ) {
|
406 //------------------------------Fixup_Save_On_Entry----------------------------
407 // The stated purpose of this routine is to take care of save-on-entry
408 // registers. However, the overall goal of the Match phase is to convert into
409 // machine-specific instructions which have RegMasks to guide allocation.
410 // So what this procedure really does is put a valid RegMask on each input
411 // to the machine-specific variations of all Return, TailCall and Halt
412 // instructions. It also adds edgs to define the save-on-entry values (and of
413 // course gives them a mask).
414
415 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
416 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
417 // Do all the pre-defined register masks
418 rms[TypeFunc::Control ] = RegMask::Empty;
419 rms[TypeFunc::I_O ] = RegMask::Empty;
420 rms[TypeFunc::Memory ] = RegMask::Empty;
421 rms[TypeFunc::ReturnAdr] = ret_adr;
422 rms[TypeFunc::FramePtr ] = fp;
423 return rms;
424 }
425
426 #define NOF_STACK_MASKS (3*11)
427
428 // Create the initial stack mask used by values spilling to the stack.
429 // Disallow any debug info in outgoing argument areas by setting the
430 // initial mask accordingly.
431 void Matcher::init_first_stack_mask() {
432
433 // Allocate storage for spill masks as masks for the appropriate load type.
434 RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * NOF_STACK_MASKS);
435
436 // Initialize empty placeholder masks into the newly allocated arena
437 for (int i = 0; i < NOF_STACK_MASKS; i++) {
438 new (rms + i) RegMask();
439 }
440
441 idealreg2spillmask [Op_RegN] = &rms[0];
442 idealreg2spillmask [Op_RegI] = &rms[1];
443 idealreg2spillmask [Op_RegL] = &rms[2];
444 idealreg2spillmask [Op_RegF] = &rms[3];
445 idealreg2spillmask [Op_RegD] = &rms[4];
446 idealreg2spillmask [Op_RegP] = &rms[5];
448 idealreg2debugmask [Op_RegN] = &rms[6];
449 idealreg2debugmask [Op_RegI] = &rms[7];
450 idealreg2debugmask [Op_RegL] = &rms[8];
451 idealreg2debugmask [Op_RegF] = &rms[9];
452 idealreg2debugmask [Op_RegD] = &rms[10];
453 idealreg2debugmask [Op_RegP] = &rms[11];
454
455 idealreg2mhdebugmask[Op_RegN] = &rms[12];
456 idealreg2mhdebugmask[Op_RegI] = &rms[13];
457 idealreg2mhdebugmask[Op_RegL] = &rms[14];
458 idealreg2mhdebugmask[Op_RegF] = &rms[15];
459 idealreg2mhdebugmask[Op_RegD] = &rms[16];
460 idealreg2mhdebugmask[Op_RegP] = &rms[17];
461
462 idealreg2spillmask [Op_VecS] = &rms[18];
463 idealreg2spillmask [Op_VecD] = &rms[19];
464 idealreg2spillmask [Op_VecX] = &rms[20];
465 idealreg2spillmask [Op_VecY] = &rms[21];
466 idealreg2spillmask [Op_VecZ] = &rms[22];
467
468 idealreg2debugmask [Op_VecS] = &rms[23];
469 idealreg2debugmask [Op_VecD] = &rms[24];
470 idealreg2debugmask [Op_VecX] = &rms[25];
471 idealreg2debugmask [Op_VecY] = &rms[26];
472 idealreg2debugmask [Op_VecZ] = &rms[27];
473
474 idealreg2mhdebugmask[Op_VecS] = &rms[28];
475 idealreg2mhdebugmask[Op_VecD] = &rms[29];
476 idealreg2mhdebugmask[Op_VecX] = &rms[30];
477 idealreg2mhdebugmask[Op_VecY] = &rms[31];
478 idealreg2mhdebugmask[Op_VecZ] = &rms[32];
479
480 OptoReg::Name i;
481
482 // At first, start with the empty mask
483 C->FIRST_STACK_mask().Clear();
484
485 // Add in the incoming argument area
486 OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
487 for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
488 C->FIRST_STACK_mask().Insert(i);
489 }
490 // Add in all bits past the outgoing argument area
491 guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
492 "must be able to represent all call arguments in reg mask");
493 OptoReg::Name init = _out_arg_limit;
494 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
495 C->FIRST_STACK_mask().Insert(i);
496 }
497 // Finally, set the "infinite stack" bit.
498 C->FIRST_STACK_mask().set_AllStack();
499
506 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
507 #ifdef _LP64
508 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
509 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
510 idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
511 #else
512 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
513 #endif
514 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
515 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
516 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
517 idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
518 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
519 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
520 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
521 idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
522
523 if (Matcher::vector_size_supported(T_BYTE,4)) {
524 *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
525 idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
526 } else {
527 *idealreg2spillmask[Op_VecS] = RegMask::Empty;
528 }
529
530 if (Matcher::vector_size_supported(T_FLOAT,2)) {
531 // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
532 // RA guarantees such alignment since it is needed for Double and Long values.
533 *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
534 idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
535 } else {
536 *idealreg2spillmask[Op_VecD] = RegMask::Empty;
537 }
538
539 if (Matcher::vector_size_supported(T_FLOAT,4)) {
540 // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
541 //
542 // RA can use input arguments stack slots for spills but until RA
543 // we don't know frame size and offset of input arg stack slots.
544 //
545 // Exclude last input arg stack slots to avoid spilling vectors there
546 // otherwise vector spills could stomp over stack slots in caller frame.
547 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
548 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
549 aligned_stack_mask.Remove(in);
550 in = OptoReg::add(in, -1);
551 }
552 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
553 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
554 *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
555 idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
556 } else {
557 *idealreg2spillmask[Op_VecX] = RegMask::Empty;
558 }
559
560 if (Matcher::vector_size_supported(T_FLOAT,8)) {
561 // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
562 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
563 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
564 aligned_stack_mask.Remove(in);
565 in = OptoReg::add(in, -1);
566 }
567 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
568 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
569 *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
570 idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
571 } else {
572 *idealreg2spillmask[Op_VecY] = RegMask::Empty;
573 }
574
575 if (Matcher::vector_size_supported(T_FLOAT,16)) {
576 // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
577 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
578 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
579 aligned_stack_mask.Remove(in);
580 in = OptoReg::add(in, -1);
581 }
582 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
583 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
584 *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
585 idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
586 } else {
587 *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
588 }
589
590 if (UseFPUForSpilling) {
591 // This mask logic assumes that the spill operations are
592 // symmetric and that the registers involved are the same size.
593 // On sparc for instance we may have to use 64 bit moves will
594 // kill 2 registers when used with F0-F31.
595 idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
596 idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
597 #ifdef _LP64
598 idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
599 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
600 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
601 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
602 #else
603 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
604 #ifdef ARM
605 // ARM has support for moving 64bit values between a pair of
606 // integer registers and a double register
607 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
608 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
609 #endif
610 #endif
611 }
612
613 // Make up debug masks. Any spill slot plus callee-save registers.
614 // Caller-save registers are assumed to be trashable by the various
615 // inline-cache fixup routines.
616 *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
617 *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
618 *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
619 *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
620 *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
621 *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
622
623 *idealreg2debugmask [Op_VecS]= *idealreg2spillmask[Op_VecS];
624 *idealreg2debugmask [Op_VecD]= *idealreg2spillmask[Op_VecD];
625 *idealreg2debugmask [Op_VecX]= *idealreg2spillmask[Op_VecX];
626 *idealreg2debugmask [Op_VecY]= *idealreg2spillmask[Op_VecY];
627 *idealreg2debugmask [Op_VecZ]= *idealreg2spillmask[Op_VecZ];
628
629 *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
630 *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
631 *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
632 *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
633 *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
634 *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
635
636 *idealreg2mhdebugmask[Op_VecS]= *idealreg2spillmask[Op_VecS];
637 *idealreg2mhdebugmask[Op_VecD]= *idealreg2spillmask[Op_VecD];
638 *idealreg2mhdebugmask[Op_VecX]= *idealreg2spillmask[Op_VecX];
639 *idealreg2mhdebugmask[Op_VecY]= *idealreg2spillmask[Op_VecY];
640 *idealreg2mhdebugmask[Op_VecZ]= *idealreg2spillmask[Op_VecZ];
641
642 // Prevent stub compilations from attempting to reference
643 // callee-saved registers from debug info
644 bool exclude_soe = !Compile::current()->is_method_compilation();
645
646 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
647 // registers the caller has to save do not work
648 if( _register_save_policy[i] == 'C' ||
649 _register_save_policy[i] == 'A' ||
650 (_register_save_policy[i] == 'E' && exclude_soe) ) {
651 idealreg2debugmask [Op_RegN]->Remove(i);
652 idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
653 idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
654 idealreg2debugmask [Op_RegF]->Remove(i); // masks
655 idealreg2debugmask [Op_RegD]->Remove(i);
656 idealreg2debugmask [Op_RegP]->Remove(i);
657 idealreg2debugmask [Op_VecS]->Remove(i);
658 idealreg2debugmask [Op_VecD]->Remove(i);
659 idealreg2debugmask [Op_VecX]->Remove(i);
660 idealreg2debugmask [Op_VecY]->Remove(i);
661 idealreg2debugmask [Op_VecZ]->Remove(i);
662
663 idealreg2mhdebugmask[Op_RegN]->Remove(i);
664 idealreg2mhdebugmask[Op_RegI]->Remove(i);
665 idealreg2mhdebugmask[Op_RegL]->Remove(i);
666 idealreg2mhdebugmask[Op_RegF]->Remove(i);
667 idealreg2mhdebugmask[Op_RegD]->Remove(i);
668 idealreg2mhdebugmask[Op_RegP]->Remove(i);
669 idealreg2mhdebugmask[Op_VecS]->Remove(i);
670 idealreg2mhdebugmask[Op_VecD]->Remove(i);
671 idealreg2mhdebugmask[Op_VecX]->Remove(i);
672 idealreg2mhdebugmask[Op_VecY]->Remove(i);
673 idealreg2mhdebugmask[Op_VecZ]->Remove(i);
674 }
675 }
676
677 // Subtract the register we use to save the SP for MethodHandle
678 // invokes to from the debug mask.
679 const RegMask save_mask = method_handle_invoke_SP_save_mask();
680 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
681 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
682 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
683 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
684 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
685 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
686 idealreg2mhdebugmask[Op_VecS]->SUBTRACT(save_mask);
687 idealreg2mhdebugmask[Op_VecD]->SUBTRACT(save_mask);
688 idealreg2mhdebugmask[Op_VecX]->SUBTRACT(save_mask);
689 idealreg2mhdebugmask[Op_VecY]->SUBTRACT(save_mask);
690 idealreg2mhdebugmask[Op_VecZ]->SUBTRACT(save_mask);
691 }
692
693 //---------------------------is_save_on_entry----------------------------------
694 bool Matcher::is_save_on_entry( int reg ) {
695 return
696 _register_save_policy[reg] == 'E' ||
697 _register_save_policy[reg] == 'A' || // Save-on-entry register?
698 // Also save argument registers in the trampolining stubs
699 (C->save_argument_registers() && is_spillable_arg(reg));
700 }
701
702 //---------------------------Fixup_Save_On_Entry-------------------------------
703 void Matcher::Fixup_Save_On_Entry( ) {
704 init_first_stack_mask();
705
706 Node *root = C->root(); // Short name for root
707 // Count number of save-on-entry registers.
708 uint soe_cnt = number_of_saved_registers();
709 uint i;
710
1963
1964 //------------------------------find_receiver----------------------------------
1965 // For a given signature, return the OptoReg for parameter 0.
1966 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1967 VMRegPair regs;
1968 BasicType sig_bt = T_OBJECT;
1969 calling_convention(&sig_bt, ®s, 1, is_outgoing);
1970 // Return argument 0 register. In the LP64 build pointers
1971 // take 2 registers, but the VM wants only the 'main' name.
1972 return OptoReg::as_OptoReg(regs.first());
1973 }
1974
1975 bool Matcher::is_vshift_con_pattern(Node *n, Node *m) {
1976 if (n != NULL && m != NULL) {
1977 return VectorNode::is_vector_shift(n) &&
1978 VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
1979 }
1980 return false;
1981 }
1982
1983 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
1984 // Must clone all producers of flags, or we will not match correctly.
1985 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
1986 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
1987 // are also there, so we may match a float-branch to int-flags and
1988 // expect the allocator to haul the flags from the int-side to the
1989 // fp-side. No can do.
1990 if (_must_clone[m->Opcode()]) {
1991 mstack.push(m, Visit);
1992 return true;
1993 }
1994 return pd_clone_node(n, m, mstack);
1995 }
1996
1997 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
1998 Node *off = m->in(AddPNode::Offset);
1999 if (off->is_Con()) {
2000 address_visited.test_set(m->_idx); // Flag as address_visited
2001 mstack.push(m->in(AddPNode::Address), Pre_Visit);
2002 // Clone X+offset as it also folds into most addressing expressions
2300 case Op_StrCompressedCopy:
2301 case Op_StrInflatedCopy:
2302 case Op_EncodeISOArray: {
2303 // Restructure into a binary tree for Matching.
2304 Node* pair = new BinaryNode(n->in(3), n->in(4));
2305 n->set_req(3, pair);
2306 n->del_req(4);
2307 break;
2308 }
2309 case Op_FmaD:
2310 case Op_FmaF:
2311 case Op_FmaVD:
2312 case Op_FmaVF: {
2313 // Restructure into a binary tree for Matching.
2314 Node* pair = new BinaryNode(n->in(1), n->in(2));
2315 n->set_req(2, pair);
2316 n->set_req(1, n->in(3));
2317 n->del_req(3);
2318 break;
2319 }
2320 case Op_VectorBlend:
2321 case Op_VectorInsert: {
2322 Node* pair = new BinaryNode(n->in(1), n->in(2));
2323 n->set_req(1, pair);
2324 n->set_req(2, n->in(3));
2325 n->del_req(3);
2326 break;
2327 }
2328 case Op_StoreVectorScatter: {
2329 Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2330 n->set_req(MemNode::ValueIn, pair);
2331 n->del_req(MemNode::ValueIn+1);
2332 break;
2333 }
2334 case Op_MulAddS2I: {
2335 Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2336 Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2337 n->set_req(1, pair1);
2338 n->set_req(2, pair2);
2339 n->del_req(4);
2340 n->del_req(3);
2341 break;
2342 }
2343 case Op_VectorMaskCmp: {
2344 n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2345 n->set_req(2, n->in(3));
2346 n->del_req(3);
2347 break;
2348 }
2349 default:
2350 break;
2351 }
2352 }
2353
2354 #ifdef ASSERT
2355 // machine-independent root to machine-dependent root
2356 void Matcher::dump_old2new_map() {
2357 _old2new_map.dump();
2358 }
2359 #endif
2360
2361 //---------------------------collect_null_checks-------------------------------
2362 // Find null checks in the ideal graph; write a machine-specific node for
2363 // it. Used by later implicit-null-check handling. Actually collects
2364 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2365 // value being tested.
2366 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2367 Node *iff = proj->in(0);
2368 if( iff->Opcode() == Op_If ) {
|