< prev index next >

src/share/vm/c1/c1_LinearScan.cpp

Print this page
rev 9032 : 8138952: C1: Distinguish between PPC32 and PPC64
Reviewed-by:


2070 #ifdef __SOFTFP__
2071       case T_DOUBLE:  // fall through
2072 #endif // __SOFTFP__
2073       case T_LONG: {
2074         int assigned_regHi = interval->assigned_regHi();
2075         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2076         assert(num_physical_regs(T_LONG) == 1 ||
2077                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2078 
2079         assert(assigned_reg != assigned_regHi, "invalid allocation");
2080         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2081                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2082         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2083         if (requires_adjacent_regs(T_LONG)) {
2084           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2085         }
2086 
2087 #ifdef _LP64
2088         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2089 #else
2090 #if defined(SPARC) || defined(PPC)
2091         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2092 #else
2093         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
2094 #endif // SPARC
2095 #endif // LP64
2096       }
2097 
2098 #ifndef __SOFTFP__
2099       case T_FLOAT: {
2100 #ifdef X86
2101         if (UseSSE >= 1) {
2102           int last_xmm_reg = pd_last_xmm_reg;
2103 #ifdef _LP64
2104           if (UseAVX < 3) {
2105             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2106           }
2107 #endif
2108           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2109           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2110           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);


2711       // the low-order word of the double and fpu_regnrLo + 1 is the
2712       // name for the other half.  *first and *second must represent the
2713       // least and most significant words, respectively.
2714 
2715 #ifdef X86
2716       // the exact location of fpu stack values is only known
2717       // during fpu stack allocation, so the stack allocator object
2718       // must be present
2719       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2720       assert(_fpu_stack_allocator != NULL, "must be present");
2721       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2722 
2723       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2724 #endif
2725 #ifdef SPARC
2726       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2727 #endif
2728 #ifdef ARM32
2729       assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2730 #endif
2731 #ifdef PPC
2732       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2733 #endif
2734 
2735 #ifdef VM_LITTLE_ENDIAN
2736       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2737 #else
2738       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2739 #endif
2740 
2741 #ifdef _LP64
2742       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2743       second = _int_0_scope_value;
2744 #else
2745       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2746       // %%% This is probably a waste but we'll keep things as they were for now
2747       if (true) {
2748         VMReg rname_second = rname_first->next();
2749         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2750       }
2751 #endif




2070 #ifdef __SOFTFP__
2071       case T_DOUBLE:  // fall through
2072 #endif // __SOFTFP__
2073       case T_LONG: {
2074         int assigned_regHi = interval->assigned_regHi();
2075         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2076         assert(num_physical_regs(T_LONG) == 1 ||
2077                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2078 
2079         assert(assigned_reg != assigned_regHi, "invalid allocation");
2080         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2081                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2082         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2083         if (requires_adjacent_regs(T_LONG)) {
2084           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2085         }
2086 
2087 #ifdef _LP64
2088         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2089 #else
2090 #if defined(SPARC) || defined(PPC32)
2091         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2092 #else
2093         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
2094 #endif // SPARC
2095 #endif // LP64
2096       }
2097 
2098 #ifndef __SOFTFP__
2099       case T_FLOAT: {
2100 #ifdef X86
2101         if (UseSSE >= 1) {
2102           int last_xmm_reg = pd_last_xmm_reg;
2103 #ifdef _LP64
2104           if (UseAVX < 3) {
2105             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2106           }
2107 #endif
2108           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2109           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2110           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);


2711       // the low-order word of the double and fpu_regnrLo + 1 is the
2712       // name for the other half.  *first and *second must represent the
2713       // least and most significant words, respectively.
2714 
2715 #ifdef X86
2716       // the exact location of fpu stack values is only known
2717       // during fpu stack allocation, so the stack allocator object
2718       // must be present
2719       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2720       assert(_fpu_stack_allocator != NULL, "must be present");
2721       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2722 
2723       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2724 #endif
2725 #ifdef SPARC
2726       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2727 #endif
2728 #ifdef ARM32
2729       assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2730 #endif
2731 #ifdef PPC32
2732       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2733 #endif
2734 
2735 #ifdef VM_LITTLE_ENDIAN
2736       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2737 #else
2738       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2739 #endif
2740 
2741 #ifdef _LP64
2742       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2743       second = _int_0_scope_value;
2744 #else
2745       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2746       // %%% This is probably a waste but we'll keep things as they were for now
2747       if (true) {
2748         VMReg rname_second = rname_first->next();
2749         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2750       }
2751 #endif


< prev index next >