src/cpu/x86/vm/c1_FrameMap_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Thu Nov 25 07:05:57 2010
--- new/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Thu Nov 25 07:05:56 2010

*** 156,168 **** --- 156,170 ---- map_register(7, rbp); #else map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); ! map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9); ! map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10); map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11); ! map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); ! map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); + // r12 is allocated conditionally. With compressed oops it holds + // the heapbase value and is not visible to the allocator. + map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11); // The unallocatable registers are at the end map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); map_register(14, rsp); map_register(15, rbp);
*** 189,201 **** --- 191,203 ---- #ifdef _LP64 _caller_save_cpu_regs[6] = r8_opr; _caller_save_cpu_regs[7] = r9_opr; _caller_save_cpu_regs[8] = r11_opr; ! _caller_save_cpu_regs[9] = r12_opr; ! _caller_save_cpu_regs[10] = r13_opr; ! _caller_save_cpu_regs[11] = r14_opr; ! _caller_save_cpu_regs[9] = r13_opr; ! _caller_save_cpu_regs[10] = r14_opr; ! _caller_save_cpu_regs[11] = r12_opr; #endif // _LP64 _xmm_regs[0] = xmm0; _xmm_regs[1] = xmm1;

src/cpu/x86/vm/c1_FrameMap_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File