--- old/src/cpu/x86/vm/assembler_x86.cpp 2014-04-24 15:52:55.000000000 -1000 +++ new/src/cpu/x86/vm/assembler_x86.cpp 2014-04-24 15:52:55.000000000 -1000 @@ -104,11 +104,9 @@ } // exceedingly dangerous constructor -Address::Address(int disp, address loc, relocInfo::relocType rtype) { - _base = noreg; - _index = noreg; - _scale = no_scale; - _disp = disp; +Address::Address(int disp, address loc, relocInfo::relocType rtype) : + _scale(no_scale), + _disp(disp) { switch (rtype) { case relocInfo::external_word_type: _rspec = external_word_Relocation::spec(loc); @@ -162,7 +160,7 @@ if (disp_reloc != relocInfo::none) { rspec = Relocation::spec_simple(disp_reloc); } - bool valid_index = index != rsp->encoding(); + bool valid_index = index != rsp.encoding(); if (valid_index) { Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); madr._rspec = rspec; @@ -206,8 +204,8 @@ emit_int32(data); } -static int encode(Register r) { - int enc = r->encoding(); +static int encode(AbstractRegister r) { + int enc = r.encoding(); if (enc >= 8) { enc -= 8; } @@ -215,7 +213,7 @@ } void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { - assert(dst->has_byte_register(), "must have byte register"); + assert(dst.has_byte_register(), "must have byte register"); assert(isByte(op1) && isByte(op2), "wrong opcode"); assert(isByte(imm8), "not a byte"); assert((op1 & 0x01) == 0, "should be 8bit operation"); @@ -273,7 +271,7 @@ } -void Assembler::emit_operand(Register reg, Register base, Register index, +void Assembler::emit_operand(AbstractRegister reg, Register base, Register index, Address::ScaleFactor scale, int disp, RelocationHolder const& rspec, int rip_relative_correction) { @@ -282,11 +280,11 @@ // Encode the registers as needed in the fields they are used in int regenc = encode(reg) << 3; - int indexenc = index->is_valid() ? encode(index) << 3 : 0; - int baseenc = base->is_valid() ? encode(base) : 0; + int indexenc = index.is_valid() ? encode(index) << 3 : 0; + int baseenc = base.is_valid() ? encode(base) : 0; - if (base->is_valid()) { - if (index->is_valid()) { + if (base.is_valid()) { + if (index.is_valid()) { assert(scale != Address::no_scale, "inconsistent address"); // [base + index*scale + disp] if (disp == 0 && rtype == relocInfo::none && @@ -352,7 +350,7 @@ } } } else { - if (index->is_valid()) { + if (index.is_valid()) { assert(scale != Address::no_scale, "inconsistent address"); // [index*scale + disp] // [00 reg 100][ss index 101] disp32 @@ -391,12 +389,6 @@ } } -void Assembler::emit_operand(XMMRegister reg, Register base, Register index, - Address::ScaleFactor scale, int disp, - RelocationHolder const& rspec) { - emit_operand((Register)reg, base, index, scale, disp, rspec); -} - // Secret local extension to Assembler::WhichOperand: #define end_pc_operand (_WhichOperand_limit) @@ -842,7 +834,7 @@ #endif // ASSERT void Assembler::emit_operand32(Register reg, Address adr) { - assert(reg->encoding() < 8, "no extended registers"); + assert(reg.encoding() < 8, "no extended registers"); assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); @@ -863,13 +855,13 @@ // MMX operations void Assembler::emit_operand(MMXRegister reg, Address adr) { assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); - emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); + emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); } // work around gcc (3.2.1-7a) bug void Assembler::emit_operand(Address adr, MMXRegister reg) { assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); - emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); + emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); } @@ -909,7 +901,7 @@ } void Assembler::adcl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x13, 0xC0, dst, src); } @@ -939,7 +931,7 @@ } void Assembler::addl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x03, 0xC0, dst, src); } @@ -1085,7 +1077,7 @@ } void Assembler::andl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x23, 0xC0, dst, src); } @@ -1105,21 +1097,21 @@ } void Assembler::bsfl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBC); emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bsrl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBD); emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bswapl(Register reg) { // bswap - int encode = prefix_and_encode(reg->encoding()); + int encode = prefix_and_encode(reg.encoding()); emit_int8(0x0F); emit_int8((unsigned char)(0xC8 | encode)); } @@ -1192,7 +1184,7 @@ } void Assembler::call(Register dst) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xFF); emit_int8((unsigned char)(0xD0 | encode)); } @@ -1228,7 +1220,7 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) { NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8(0x40 | cc); emit_int8((unsigned char)(0xC0 | encode)); @@ -1265,7 +1257,7 @@ } void Assembler::cmpl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x3B, 0xC0, dst, src); } @@ -1432,19 +1424,19 @@ } void Assembler::idivl(Register src) { - int encode = prefix_and_encode(src->encoding()); + int encode = prefix_and_encode(src.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xF8 | encode)); } void Assembler::divl(Register src) { // Unsigned - int encode = prefix_and_encode(src->encoding()); + int encode = prefix_and_encode(src.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xF0 | encode)); } void Assembler::imull(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xAF); emit_int8((unsigned char)(0xC0 | encode)); @@ -1452,7 +1444,7 @@ void Assembler::imull(Register dst, Register src, int value) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); if (is8bit(value)) { emit_int8(0x6B); emit_int8((unsigned char)(0xC0 | encode)); @@ -1574,7 +1566,7 @@ } void Assembler::jmp(Register entry) { - int encode = prefix_and_encode(entry->encoding()); + int encode = prefix_and_encode(entry.encoding()); emit_int8((unsigned char)0xFF); emit_int8((unsigned char)(0xE0 | encode)); } @@ -1644,7 +1636,7 @@ void Assembler::lzcntl(Register dst, Register src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); emit_int8((unsigned char)0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBD); emit_int8((unsigned char)(0xC0 | encode)); @@ -1680,7 +1672,7 @@ } void Assembler::movb(Register dst, Address src) { - NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); + NOT_LP64(assert(dst.has_byte_register(), "must have byte register")); InstructionMark im(this); prefix(src, dst, true); emit_int8((unsigned char)0x8A); @@ -1698,7 +1690,7 @@ void Assembler::movb(Address dst, Register src) { - assert(src->has_byte_register(), "must have byte register"); + assert(src.has_byte_register(), "must have byte register"); InstructionMark im(this); prefix(dst, src, true); emit_int8((unsigned char)0x88); @@ -1796,13 +1788,13 @@ // Uses zero extension on 64bit void Assembler::movl(Register dst, int32_t imm32) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)(0xB8 | encode)); emit_int32(imm32); } void Assembler::movl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8((unsigned char)0x8B); emit_int8((unsigned char)(0xC0 | encode)); } @@ -1882,8 +1874,8 @@ } void Assembler::movsbl(Register dst, Register src) { // movsxb - NOT_LP64(assert(src->has_byte_register(), "must have byte register")); - int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); + NOT_LP64(assert(src.has_byte_register(), "must have byte register")); + int encode = prefix_and_encode(dst.encoding(), src.encoding(), true); emit_int8(0x0F); emit_int8((unsigned char)0xBE); emit_int8((unsigned char)(0xC0 | encode)); @@ -1934,7 +1926,7 @@ } void Assembler::movswl(Register dst, Register src) { // movsxw - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBF); emit_int8((unsigned char)(0xC0 | encode)); @@ -1975,8 +1967,8 @@ } void Assembler::movzbl(Register dst, Register src) { // movzxb - NOT_LP64(assert(src->has_byte_register(), "must have byte register")); - int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); + NOT_LP64(assert(src.has_byte_register(), "must have byte register")); + int encode = prefix_and_encode(dst.encoding(), src.encoding(), true); emit_int8(0x0F); emit_int8((unsigned char)0xB6); emit_int8(0xC0 | encode); @@ -1991,7 +1983,7 @@ } void Assembler::movzwl(Register dst, Register src) { // movzxw - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xB7); emit_int8(0xC0 | encode); @@ -2005,7 +1997,7 @@ } void Assembler::mull(Register src) { - int encode = prefix_and_encode(src->encoding()); + int encode = prefix_and_encode(src.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xE0 | encode)); } @@ -2031,7 +2023,7 @@ } void Assembler::negl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xD8 | encode)); } @@ -2290,7 +2282,7 @@ } void Assembler::notl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xD0 | encode)); } @@ -2314,7 +2306,7 @@ } void Assembler::orl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x0B, 0xC0, dst, src); } @@ -2366,7 +2358,7 @@ void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); - int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false); + int encode = simd_prefix_and_encode(as_XMMRegister(dst.encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false); emit_int8(0x16); emit_int8((unsigned char)(0xC0 | encode)); emit_int8(imm8); @@ -2374,7 +2366,7 @@ void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); - int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true); + int encode = simd_prefix_and_encode(as_XMMRegister(dst.encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true); emit_int8(0x16); emit_int8((unsigned char)(0xC0 | encode)); emit_int8(imm8); @@ -2382,7 +2374,7 @@ void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); - int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false); + int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src.encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false); emit_int8(0x22); emit_int8((unsigned char)(0xC0 | encode)); emit_int8(imm8); @@ -2390,7 +2382,7 @@ void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); - int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true); + int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src.encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true); emit_int8(0x22); emit_int8((unsigned char)(0xC0 | encode)); emit_int8(imm8); @@ -2413,7 +2405,7 @@ // generic void Assembler::pop(Register dst) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8(0x58 | encode); } @@ -2430,7 +2422,7 @@ void Assembler::popcntl(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xB8); emit_int8((unsigned char)(0xC0 | encode)); @@ -2589,7 +2581,7 @@ InstructionMark im(this); bool vector256 = true; assert(dst != xnoreg, "sanity"); - int dst_enc = dst->encoding(); + int dst_enc = dst.encoding(); // swap src<->dst for encoding vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256); emit_int8(0x17); @@ -2639,7 +2631,7 @@ } void Assembler::push(Register src) { - int encode = prefix_and_encode(src->encoding()); + int encode = prefix_and_encode(src.encoding()); emit_int8(0x50 | encode); } @@ -2660,7 +2652,7 @@ void Assembler::rcll(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); if (imm8 == 1) { emit_int8((unsigned char)0xD1); emit_int8((unsigned char)(0xD0 | encode)); @@ -2737,7 +2729,7 @@ } void Assembler::sarl(Register dst, int imm8) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); assert(isShiftCount(imm8), "illegal shift count"); if (imm8 == 1) { emit_int8((unsigned char)0xD1); @@ -2750,7 +2742,7 @@ } void Assembler::sarl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xD3); emit_int8((unsigned char)(0xF8 | encode)); } @@ -2775,13 +2767,13 @@ } void Assembler::sbbl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x1B, 0xC0, dst, src); } void Assembler::setb(Condition cc, Register dst) { assert(0 <= cc && cc < 16, "illegal cc"); - int encode = prefix_and_encode(dst->encoding(), true); + int encode = prefix_and_encode(dst.encoding(), true); emit_int8(0x0F); emit_int8((unsigned char)0x90 | cc); emit_int8((unsigned char)(0xC0 | encode)); @@ -2789,7 +2781,7 @@ void Assembler::shll(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); if (imm8 == 1 ) { emit_int8((unsigned char)0xD1); emit_int8((unsigned char)(0xE0 | encode)); @@ -2801,21 +2793,21 @@ } void Assembler::shll(Register dst) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xD3); emit_int8((unsigned char)(0xE0 | encode)); } void Assembler::shrl(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xC1); emit_int8((unsigned char)(0xE8 | encode)); emit_int8(imm8); } void Assembler::shrl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xD3); emit_int8((unsigned char)(0xE8 | encode)); } @@ -2890,7 +2882,7 @@ } void Assembler::subl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x2B, 0xC0, dst, src); } @@ -2915,8 +2907,8 @@ } void Assembler::testb(Register dst, int imm8) { - NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); - (void) prefix_and_encode(dst->encoding(), true); + NOT_LP64(assert(dst.has_byte_register(), "must have byte register")); + (void) prefix_and_encode(dst.encoding(), true); emit_arith_b(0xF6, 0xC0, dst, imm8); } @@ -2924,7 +2916,7 @@ // not using emit_arith because test // doesn't support sign-extension of // 8bit operands - int encode = dst->encoding(); + int encode = dst.encoding(); if (encode == 0) { emit_int8((unsigned char)0xA9); } else { @@ -2936,7 +2928,7 @@ } void Assembler::testl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x85, 0xC0, dst, src); } @@ -2950,7 +2942,7 @@ void Assembler::tzcntl(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); emit_int8((unsigned char)0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBC); emit_int8((unsigned char)0xC0 | encode); @@ -2959,7 +2951,7 @@ void Assembler::tzcntq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); emit_int8((unsigned char)0xF3); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBC); emit_int8((unsigned char)(0xC0 | encode)); @@ -3025,7 +3017,7 @@ } void Assembler::xchgl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); + int encode = prefix_and_encode(dst.encoding(), src.encoding()); emit_int8((unsigned char)0x87); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3055,7 +3047,7 @@ } void Assembler::xorl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); + (void) prefix_and_encode(dst.encoding(), src.encoding()); emit_arith(0x33, 0xC0, dst, src); } @@ -3500,8 +3492,8 @@ void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); InstructionMark im(this); - int dst_enc = dst->encoding(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; + int dst_enc = dst.encoding(); + int nds_enc = nds.is_valid() ? nds.encoding() : 0; vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256); emit_int8(0x40); emit_operand(dst, src); @@ -3784,7 +3776,7 @@ InstructionMark im(this); bool vector256 = true; assert(dst != xnoreg, "sanity"); - int dst_enc = dst->encoding(); + int dst_enc = dst.encoding(); // swap src<->dst for encoding vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256); emit_int8(0x18); @@ -3798,7 +3790,7 @@ InstructionMark im(this); bool vector256 = true; assert(src != xnoreg, "sanity"); - int src_enc = src->encoding(); + int src_enc = src.encoding(); vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256); emit_int8(0x19); emit_operand(src, dst); @@ -3822,7 +3814,7 @@ InstructionMark im(this); bool vector256 = true; assert(dst != xnoreg, "sanity"); - int dst_enc = dst->encoding(); + int dst_enc = dst.encoding(); // swap src<->dst for encoding vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256); emit_int8(0x38); @@ -3836,7 +3828,7 @@ InstructionMark im(this); bool vector256 = true; assert(src != xnoreg, "sanity"); - int src_enc = src->encoding(); + int src_enc = src.encoding(); vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256); emit_int8(0x39); emit_operand(src, dst); @@ -3877,7 +3869,7 @@ // NO PREFIX AS NEVER 64BIT InstructionMark im(this); emit_int8((unsigned char)0x81); - emit_int8((unsigned char)(0xF8 | src1->encoding())); + emit_int8((unsigned char)(0xF8 | src1.encoding())); emit_data(imm32, rspec, 0); } @@ -3901,7 +3893,7 @@ void Assembler::decl(Register dst) { // Don't use it directly. Use MacroAssembler::decrementl() instead. - emit_int8(0x48 | dst->encoding()); + emit_int8(0x48 | dst.encoding()); } #endif // _LP64 @@ -4424,8 +4416,8 @@ void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) { if (UseAVX > 0) { - int xreg_enc = xreg->encoding(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; + int xreg_enc = xreg.encoding(); + int nds_enc = nds.is_valid() ? nds.encoding() : 0; vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256); } else { assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); @@ -4434,10 +4426,10 @@ } int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) { - int dst_enc = dst->encoding(); - int src_enc = src->encoding(); + int dst_enc = dst.encoding(); + int src_enc = src.encoding(); if (UseAVX > 0) { - int nds_enc = nds->is_valid() ? nds->encoding() : 0; + int nds_enc = nds.is_valid() ? nds.encoding() : 0; return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256); } else { assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); @@ -4492,7 +4484,7 @@ void Assembler::incl(Register dst) { // Don't use it directly. Use MacroAssembler::incrementl() instead. - emit_int8(0x40 | dst->encoding()); + emit_int8(0x40 | dst.encoding()); } void Assembler::lea(Register dst, Address src) { @@ -4508,7 +4500,7 @@ void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { InstructionMark im(this); - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)(0xB8 | encode)); emit_data((int)imm32, rspec, 0); } @@ -4530,25 +4522,25 @@ void Assembler::set_byte_if_not_zero(Register dst) { emit_int8(0x0F); emit_int8((unsigned char)0x95); - emit_int8((unsigned char)(0xE0 | dst->encoding())); + emit_int8((unsigned char)(0xE0 | dst.encoding())); } void Assembler::shldl(Register dst, Register src) { emit_int8(0x0F); emit_int8((unsigned char)0xA5); - emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); + emit_int8((unsigned char)(0xC0 | src.encoding() << 3 | dst.encoding())); } void Assembler::shrdl(Register dst, Register src) { emit_int8(0x0F); emit_int8((unsigned char)0xAD); - emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding())); + emit_int8((unsigned char)(0xC0 | src.encoding() << 3 | dst.encoding())); } #else // LP64 void Assembler::set_byte_if_not_zero(Register dst) { - int enc = prefix_and_encode(dst->encoding(), true); + int enc = prefix_and_encode(dst.encoding(), true); emit_int8(0x0F); emit_int8((unsigned char)0x95); emit_int8((unsigned char)(0xE0 | enc)); @@ -4719,7 +4711,7 @@ } void Assembler::prefix(Register reg) { - if (reg->encoding() >= 8) { + if (reg.encoding() >= 8) { prefix(REX_B); } } @@ -4756,7 +4748,7 @@ void Assembler::prefix(Address adr, Register reg, bool byteinst) { - if (reg->encoding() < 8) { + if (reg.encoding() < 8) { if (adr.base_needs_rex()) { if (adr.index_needs_rex()) { prefix(REX_XB); @@ -4766,7 +4758,7 @@ } else { if (adr.index_needs_rex()) { prefix(REX_X); - } else if (byteinst && reg->encoding() >= 4 ) { + } else if (byteinst && reg.encoding() >= 4 ) { prefix(REX); } } @@ -4788,7 +4780,7 @@ } void Assembler::prefixq(Address adr, Register src) { - if (src->encoding() < 8) { + if (src.encoding() < 8) { if (adr.base_needs_rex()) { if (adr.index_needs_rex()) { prefix(REX_WXB); @@ -4820,7 +4812,7 @@ } void Assembler::prefix(Address adr, XMMRegister reg) { - if (reg->encoding() < 8) { + if (reg.encoding() < 8) { if (adr.base_needs_rex()) { if (adr.index_needs_rex()) { prefix(REX_XB); @@ -4850,7 +4842,7 @@ } void Assembler::prefixq(Address adr, XMMRegister src) { - if (src->encoding() < 8) { + if (src.encoding() < 8) { if (adr.base_needs_rex()) { if (adr.index_needs_rex()) { prefix(REX_WXB); @@ -4882,7 +4874,7 @@ } void Assembler::adcq(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith(0x81, 0xD0, dst, imm32); } @@ -4894,7 +4886,7 @@ } void Assembler::adcq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x13, 0xC0, dst, src); } @@ -4912,7 +4904,7 @@ } void Assembler::addq(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith(0x81, 0xC0, dst, imm32); } @@ -4924,7 +4916,7 @@ } void Assembler::addq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x03, 0xC0, dst, src); } @@ -4937,7 +4929,7 @@ } void Assembler::andq(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith(0x81, 0xE0, dst, imm32); } @@ -4949,7 +4941,7 @@ } void Assembler::andq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x23, 0xC0, dst, src); } @@ -4969,21 +4961,21 @@ } void Assembler::bsfq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBC); emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bsrq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBD); emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bswapq(Register reg) { - int encode = prefixq_and_encode(reg->encoding()); + int encode = prefixq_and_encode(reg.encoding()); emit_int8(0x0F); emit_int8((unsigned char)(0xC8 | encode)); } @@ -5046,7 +5038,7 @@ } void Assembler::cmovq(Condition cc, Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8(0x40 | cc); emit_int8((unsigned char)(0xC0 | encode)); @@ -5069,7 +5061,7 @@ } void Assembler::cmpq(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith(0x81, 0xF8, dst, imm32); } @@ -5081,7 +5073,7 @@ } void Assembler::cmpq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x3B, 0xC0, dst, src); } @@ -5147,7 +5139,7 @@ void Assembler::decl(Register dst) { // Don't use it directly. Use MacroAssembler::decrementl() instead. // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xFF); emit_int8((unsigned char)(0xC8 | encode)); } @@ -5155,7 +5147,7 @@ void Assembler::decq(Register dst) { // Don't use it directly. Use MacroAssembler::decrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xFF); emit_int8(0xC8 | encode); } @@ -5183,20 +5175,20 @@ } void Assembler::idivq(Register src) { - int encode = prefixq_and_encode(src->encoding()); + int encode = prefixq_and_encode(src.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xF8 | encode)); } void Assembler::imulq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xAF); emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::imulq(Register dst, Register src, int value) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); if (is8bit(value)) { emit_int8(0x6B); emit_int8((unsigned char)(0xC0 | encode)); @@ -5219,7 +5211,7 @@ void Assembler::incl(Register dst) { // Don't use it directly. Use MacroAssembler::incrementl() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)0xFF); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5227,7 +5219,7 @@ void Assembler::incq(Register dst) { // Don't use it directly. Use MacroAssembler::incrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xFF); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5253,21 +5245,21 @@ void Assembler::mov64(Register dst, int64_t imm64) { InstructionMark im(this); - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)(0xB8 | encode)); emit_int64(imm64); } void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { InstructionMark im(this); - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8(0xB8 | encode); emit_data64(imm64, rspec); } void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { InstructionMark im(this); - int encode = prefix_and_encode(dst->encoding()); + int encode = prefix_and_encode(dst.encoding()); emit_int8((unsigned char)(0xB8 | encode)); emit_data((int)imm32, rspec, narrow_oop_operand); } @@ -5282,7 +5274,7 @@ void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { InstructionMark im(this); - int encode = prefix_and_encode(src1->encoding()); + int encode = prefix_and_encode(src1.encoding()); emit_int8((unsigned char)0x81); emit_int8((unsigned char)(0xF8 | encode)); emit_data((int)imm32, rspec, narrow_oop_operand); @@ -5299,7 +5291,7 @@ void Assembler::lzcntq(Register dst, Register src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); emit_int8((unsigned char)0xF3); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBD); emit_int8((unsigned char)(0xC0 | encode)); @@ -5323,7 +5315,7 @@ } void Assembler::movq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8((unsigned char)0x8B); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5351,7 +5343,7 @@ } void Assembler::movsbq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xBE); emit_int8((unsigned char)(0xC0 | encode)); @@ -5363,7 +5355,7 @@ // as a result we shouldn't use until tested at runtime... ShouldNotReachHere(); InstructionMark im(this); - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)(0xC7 | encode)); emit_int32(imm32); } @@ -5385,7 +5377,7 @@ } void Assembler::movslq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x63); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5399,7 +5391,7 @@ } void Assembler::movswq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8((unsigned char)0x0F); emit_int8((unsigned char)0xBF); emit_int8((unsigned char)(0xC0 | encode)); @@ -5414,7 +5406,7 @@ } void Assembler::movzbq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8(0x0F); emit_int8((unsigned char)0xB6); emit_int8(0xC0 | encode); @@ -5429,20 +5421,20 @@ } void Assembler::movzwq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8((unsigned char)0x0F); emit_int8((unsigned char)0xB7); emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::negq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xD8 | encode)); } void Assembler::notq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xF7); emit_int8((unsigned char)(0xD0 | encode)); } @@ -5456,7 +5448,7 @@ } void Assembler::orq(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith(0x81, 0xC8, dst, imm32); } @@ -5468,7 +5460,7 @@ } void Assembler::orq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x0B, 0xC0, dst, src); } @@ -5506,7 +5498,7 @@ void Assembler::popcntq(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8((unsigned char)0x0F); emit_int8((unsigned char)0xB8); emit_int8((unsigned char)(0xC0 | encode)); @@ -5553,7 +5545,7 @@ void Assembler::rclq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); if (imm8 == 1) { emit_int8((unsigned char)0xD1); emit_int8((unsigned char)(0xD0 | encode)); @@ -5565,7 +5557,7 @@ } void Assembler::sarq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); if (imm8 == 1) { emit_int8((unsigned char)0xD1); emit_int8((unsigned char)(0xF8 | encode)); @@ -5577,7 +5569,7 @@ } void Assembler::sarq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xD3); emit_int8((unsigned char)(0xF8 | encode)); } @@ -5589,7 +5581,7 @@ } void Assembler::sbbq(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith(0x81, 0xD8, dst, imm32); } @@ -5601,13 +5593,13 @@ } void Assembler::sbbq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x1B, 0xC0, dst, src); } void Assembler::shlq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); if (imm8 == 1) { emit_int8((unsigned char)0xD1); emit_int8((unsigned char)(0xE0 | encode)); @@ -5619,21 +5611,21 @@ } void Assembler::shlq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xD3); emit_int8((unsigned char)(0xE0 | encode)); } void Assembler::shrq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xC1); emit_int8((unsigned char)(0xE8 | encode)); emit_int8(imm8); } void Assembler::shrq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); + int encode = prefixq_and_encode(dst.encoding()); emit_int8((unsigned char)0xD3); emit_int8(0xE8 | encode); } @@ -5652,13 +5644,13 @@ } void Assembler::subq(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith(0x81, 0xE8, dst, imm32); } // Force generation of a 4 byte immediate value even if it fits into 8bit void Assembler::subq_imm32(Register dst, int32_t imm32) { - (void) prefixq_and_encode(dst->encoding()); + (void) prefixq_and_encode(dst.encoding()); emit_arith_imm32(0x81, 0xE8, dst, imm32); } @@ -5670,7 +5662,7 @@ } void Assembler::subq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x2B, 0xC0, dst, src); } @@ -5678,7 +5670,7 @@ // not using emit_arith because test // doesn't support sign-extension of // 8bit operands - int encode = dst->encoding(); + int encode = dst.encoding(); if (encode == 0) { prefix(REX_W); emit_int8((unsigned char)0xA9); @@ -5691,7 +5683,7 @@ } void Assembler::testq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x85, 0xC0, dst, src); } @@ -5711,13 +5703,13 @@ } void Assembler::xchgq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + int encode = prefixq_and_encode(dst.encoding(), src.encoding()); emit_int8((unsigned char)0x87); emit_int8((unsigned char)(0xc0 | encode)); } void Assembler::xorq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst.encoding(), src.encoding()); emit_arith(0x33, 0xC0, dst, src); } --- old/src/cpu/x86/vm/assembler_x86.hpp 2014-04-24 15:52:56.000000000 -1000 +++ new/src/cpu/x86/vm/assembler_x86.hpp 2014-04-24 15:52:56.000000000 -1000 @@ -31,119 +31,6 @@ // Contains all the definitions needed for x86 assembly code generation. -// Calling convention -class Argument VALUE_OBJ_CLASS_SPEC { - public: - enum { -#ifdef _LP64 -#ifdef _WIN64 - n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) - n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) -#else - n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) - n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) -#endif // _WIN64 - n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... - n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... -#else - n_register_parameters = 0 // 0 registers used to pass arguments -#endif // _LP64 - }; -}; - - -#ifdef _LP64 -// Symbolically name the register arguments used by the c calling convention. -// Windows is different from linux/solaris. So much for standards... - -#ifdef _WIN64 - -REGISTER_DECLARATION(Register, c_rarg0, rcx); -REGISTER_DECLARATION(Register, c_rarg1, rdx); -REGISTER_DECLARATION(Register, c_rarg2, r8); -REGISTER_DECLARATION(Register, c_rarg3, r9); - -REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); -REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); -REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); -REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); - -#else - -REGISTER_DECLARATION(Register, c_rarg0, rdi); -REGISTER_DECLARATION(Register, c_rarg1, rsi); -REGISTER_DECLARATION(Register, c_rarg2, rdx); -REGISTER_DECLARATION(Register, c_rarg3, rcx); -REGISTER_DECLARATION(Register, c_rarg4, r8); -REGISTER_DECLARATION(Register, c_rarg5, r9); - -REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); -REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); -REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); -REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); -REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); -REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); -REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); -REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); - -#endif // _WIN64 - -// Symbolically name the register arguments used by the Java calling convention. -// We have control over the convention for java so we can do what we please. -// What pleases us is to offset the java calling convention so that when -// we call a suitable jni method the arguments are lined up and we don't -// have to do little shuffling. A suitable jni method is non-static and a -// small number of arguments (two fewer args on windows) -// -// |-------------------------------------------------------| -// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | -// |-------------------------------------------------------| -// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) -// | rdi rsi rdx rcx r8 r9 | solaris/linux -// |-------------------------------------------------------| -// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | -// |-------------------------------------------------------| - -REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); -REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); -REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); -// Windows runs out of register args here -#ifdef _WIN64 -REGISTER_DECLARATION(Register, j_rarg3, rdi); -REGISTER_DECLARATION(Register, j_rarg4, rsi); -#else -REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); -REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); -#endif /* _WIN64 */ -REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); - -REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); -REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); -REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); -REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); -REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); -REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); -REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); -REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); - -REGISTER_DECLARATION(Register, rscratch1, r10); // volatile -REGISTER_DECLARATION(Register, rscratch2, r11); // volatile - -REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved -REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved - -#else -// rscratch1 will apear in 32bit code that is dead but of course must compile -// Using noreg ensures if the dead code is incorrectly live and executed it -// will cause an assertion failure -#define rscratch1 noreg -#define rscratch2 noreg - -#endif // _LP64 - -// JSR 292 fixed register usages: -REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp); - // Address is an abstraction used to represent a memory location // using any of the amd64 addressing modes with one object. // @@ -217,7 +104,7 @@ _index(index), _scale(scale), _disp (disp) { - assert(!index->is_valid() == (scale == Address::no_scale), + assert(!index.is_valid() == (scale == Address::no_scale), "inconsistent address"); } @@ -227,7 +114,7 @@ _scale(scale), _disp (disp + (index.constant_or_zero() * scale_size(scale))) { if (!index.is_register()) scale = Address::no_scale; - assert(!_index->is_valid() == (scale == Address::no_scale), + assert(!_index.is_valid() == (scale == Address::no_scale), "inconsistent address"); } @@ -240,7 +127,7 @@ Address a = (*this); a._disp += disp.constant_or_zero() * scale_size(scale); if (disp.is_register()) { - assert(!a.index()->is_valid(), "competing indexes"); + assert(!a.index().is_valid(), "competing indexes"); a._index = disp.as_register(); a._scale = scale; } @@ -275,7 +162,7 @@ _index(index), _scale(scale), _disp(in_bytes(disp)) { - assert(!index->is_valid() == (scale == Address::no_scale), + assert(!index.is_valid() == (scale == Address::no_scale), "inconsistent address"); } @@ -285,7 +172,7 @@ _scale(scale), _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) { if (!index.is_register()) scale = Address::no_scale; - assert(!_index->is_valid() == (scale == Address::no_scale), + assert(!_index.is_valid() == (scale == Address::no_scale), "inconsistent address"); } @@ -307,11 +194,11 @@ private: bool base_needs_rex() const { - return _base != noreg && _base->encoding() >= 8; + return _base != noreg && _base.encoding() >= 8; } bool index_needs_rex() const { - return _index != noreg &&_index->encoding() >= 8; + return _index != noreg && _index.encoding() >= 8; } relocInfo::relocType reloc() const { return _rspec.type(); } @@ -585,22 +472,22 @@ void vex_prefix(XMMRegister dst, XMMRegister nds, Address src, VexSimdPrefix pre, bool vector256 = false) { - int dst_enc = dst->encoding(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; + int dst_enc = dst.encoding(); + int nds_enc = nds.is_valid() ? nds.encoding() : 0; vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256); } void vex_prefix_0F38(Register dst, Register nds, Address src) { bool vex_w = false; bool vector256 = false; - vex_prefix(src, nds->encoding(), dst->encoding(), + vex_prefix(src, nds.encoding(), dst.encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); } void vex_prefix_0F38_q(Register dst, Register nds, Address src) { bool vex_w = true; bool vector256 = false; - vex_prefix(src, nds->encoding(), dst->encoding(), + vex_prefix(src, nds.encoding(), dst.encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); } int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, @@ -610,21 +497,21 @@ int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) { bool vex_w = false; bool vector256 = false; - return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), + return vex_prefix_and_encode(dst.encoding(), nds.encoding(), src.encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); } int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) { bool vex_w = true; bool vector256 = false; - return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), + return vex_prefix_and_encode(dst.encoding(), nds.encoding(), src.encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); } int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, bool vector256 = false, VexOpcode opc = VEX_OPCODE_0F) { - int src_enc = src->encoding(); - int dst_enc = dst->encoding(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; + int src_enc = src.encoding(); + int dst_enc = dst.encoding(); + int nds_enc = nds.is_valid() ? nds.encoding() : 0; return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256); } @@ -656,21 +543,21 @@ // It is OK to cast from Register to XMMRegister to pass argument here // since only encoding is used in simd_prefix_and_encode() and number of // Gen and Xmm registers are the same. - return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre); + return simd_prefix_and_encode(dst, nds, as_XMMRegister(src.encoding()), pre); } int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) { return simd_prefix_and_encode(dst, xnoreg, src, pre); } int simd_prefix_and_encode(Register dst, XMMRegister src, VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { - return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc); + return simd_prefix_and_encode(as_XMMRegister(dst.encoding()), xnoreg, src, pre, opc); } // Move/convert 64-bit integer value. int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src, VexSimdPrefix pre) { bool rex_w = true; - return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w); + return simd_prefix_and_encode(dst, nds, as_XMMRegister(src.encoding()), pre, VEX_OPCODE_0F, rex_w); } int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) { return simd_prefix_and_encode_q(dst, xnoreg, src, pre); @@ -678,7 +565,7 @@ int simd_prefix_and_encode_q(Register dst, XMMRegister src, VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { bool rex_w = true; - return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w); + return simd_prefix_and_encode(as_XMMRegister(dst.encoding()), xnoreg, src, pre, opc, rex_w); } // Helper functions for groups of instructions @@ -698,7 +585,7 @@ void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, bool vector256); - void emit_operand(Register reg, + void emit_operand(AbstractRegister reg, Register base, Register index, Address::ScaleFactor scale, int disp, RelocationHolder const& rspec, @@ -709,11 +596,6 @@ // operands that only take the original 32bit registers void emit_operand32(Register reg, Address adr); - void emit_operand(XMMRegister reg, - Register base, Register index, Address::ScaleFactor scale, - int disp, - RelocationHolder const& rspec); - void emit_operand(XMMRegister reg, Address adr); void emit_operand(MMXRegister reg, Address adr); --- old/src/cpu/x86/vm/c1_Defs_x86.hpp 2014-04-24 15:52:57.000000000 -1000 +++ new/src/cpu/x86/vm/c1_Defs_x86.hpp 2014-04-24 15:52:57.000000000 -1000 @@ -39,9 +39,9 @@ // registers enum { - pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission - pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission - pd_nof_xmm_regs_frame_map = XMMRegisterImpl::number_of_registers, // number of registers used during code emission + pd_nof_cpu_regs_frame_map = Register::number_of_registers, // number of registers used during code emission + pd_nof_fpu_regs_frame_map = FloatRegister::number_of_registers, // number of registers used during code emission + pd_nof_xmm_regs_frame_map = XMMRegister::number_of_registers, // number of registers used during code emission #ifdef _LP64 #define UNALLOCATED 4 // rsp, rbp, r15, r10 --- old/src/cpu/x86/vm/c1_FrameMap_x86.cpp 2014-04-24 15:52:57.000000000 -1000 +++ new/src/cpu/x86/vm/c1_FrameMap_x86.cpp 2014-04-24 15:52:57.000000000 -1000 @@ -59,7 +59,7 @@ } } else if (r_1->is_FloatRegister()) { assert(type == T_DOUBLE || type == T_FLOAT, "wrong type"); - int num = r_1->as_FloatRegister()->encoding(); + int num = r_1->as_FloatRegister().encoding(); if (type == T_FLOAT) { opr = LIR_OprFact::single_fpu(num); } else { @@ -67,7 +67,7 @@ } } else if (r_1->is_XMMRegister()) { assert(type == T_DOUBLE || type == T_FLOAT, "wrong type"); - int num = r_1->as_XMMRegister()->encoding(); + int num = r_1->as_XMMRegister().encoding(); if (type == T_FLOAT) { opr = LIR_OprFact::single_xmm(num); } else { @@ -144,7 +144,7 @@ LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, }; -XMMRegister FrameMap::_xmm_regs [] = { 0, }; +XMMRegister FrameMap::_xmm_regs[]; XMMRegister FrameMap::nr2xmmreg(int rnr) { assert(_init_done, "tables not initialized"); @@ -333,10 +333,10 @@ // arguments non-argument locals -VMReg FrameMap::fpu_regname (int n) { +VMReg FrameMap::fpu_regname(int n) { // Return the OptoReg name for the fpu stack slot "n" // A spilled fpu stack slot comprises to two single-word OptoReg's. - return as_FloatRegister(n)->as_VMReg(); + return as_FloatRegister(n).as_VMReg(); } LIR_Opr FrameMap::stack_pointer() { --- old/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2014-04-24 15:52:58.000000000 -1000 +++ new/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2014-04-24 15:52:58.000000000 -1000 @@ -1012,7 +1012,7 @@ __ movptr(compressed_src, src->as_register()); __ encode_heap_oop(compressed_src); if (patch_code != lir_patch_none) { - info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); + info->oop_map()->set_narrowoop(compressed_src.as_VMReg()); } } #endif @@ -1112,7 +1112,7 @@ case T_BOOLEAN: { Register src_reg = src->as_register(); Address dst_addr = as_Address(to_addr); - assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); + assert(VM_Version::is_P6() || src_reg.has_byte_register(), "must use byte registers if not P6"); __ movb(dst_addr, src_reg); break; } @@ -1338,7 +1338,7 @@ case T_BOOLEAN: // fall through case T_BYTE: { Register dest_reg = dest->as_register(); - assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); + assert(VM_Version::is_P6() || dest_reg.has_byte_register(), "must use byte registers if not P6"); if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { __ movsbl(dest_reg, from_addr); } else { @@ -1351,7 +1351,7 @@ case T_CHAR: { Register dest_reg = dest->as_register(); - assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); + assert(VM_Version::is_P6() || dest_reg.has_byte_register(), "must use byte registers if not P6"); if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { __ movzwl(dest_reg, from_addr); } else { @@ -1988,7 +1988,7 @@ Register newval = op->new_value()->as_register(); Register cmpval = op->cmp_value()->as_register(); assert(cmpval == rax, "wrong register"); - assert(newval != NULL, "new val must be register"); + assert(newval != noreg, "new val must be register"); assert(cmpval != newval, "cmp and new values must be in different registers"); assert(cmpval != addr, "cmp and addr must be in different registers"); assert(newval != addr, "new value and addr must be in different registers"); @@ -2025,7 +2025,7 @@ Register newval = op->new_value()->as_register_lo(); Register cmpval = op->cmp_value()->as_register_lo(); assert(cmpval == rax, "wrong register"); - assert(newval != NULL, "new val must be register"); + assert(newval != noreg, "new val must be register"); assert(cmpval != newval, "cmp and new values must be in different registers"); assert(cmpval != addr, "cmp and addr must be in different registers"); assert(newval != addr, "new value and addr must be in different registers"); --- old/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp 2014-04-24 15:52:59.000000000 -1000 +++ new/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp 2014-04-24 15:52:58.000000000 -1000 @@ -148,7 +148,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len); - if (UseBiasedLocking && !len->is_valid()) { + if (UseBiasedLocking && !len.is_valid()) { assert_different_registers(obj, klass, len, t1, t2); movptr(t1, Address(klass, Klass::prototype_header_offset())); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); @@ -167,7 +167,7 @@ movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); } - if (len->is_valid()) { + if (len.is_valid()) { movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); } #ifdef _LP64 --- old/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2014-04-24 15:52:59.000000000 -1000 +++ new/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2014-04-24 15:52:59.000000000 -1000 @@ -48,7 +48,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { // setup registers const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) - assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); + assert(!(oop_result1.is_valid() || metadata_result.is_valid()) || oop_result1 != metadata_result, "registers must be different"); assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); assert(args_size >= 0, "illegal args_size"); bool align_stack = false; @@ -110,10 +110,10 @@ // exception pending => remove activation and forward to exception handler movptr(rax, Address(thread, Thread::pending_exception_offset())); // make sure that the vm_results are cleared - if (oop_result1->is_valid()) { + if (oop_result1.is_valid()) { movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); } - if (metadata_result->is_valid()) { + if (metadata_result.is_valid()) { movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); } if (frame_size() == no_frame_size) { @@ -127,10 +127,10 @@ bind(L); } // get oop results if there are any and reset the values in the thread - if (oop_result1->is_valid()) { + if (oop_result1.is_valid()) { get_vm_result(oop_result1, thread); } - if (metadata_result->is_valid()) { + if (metadata_result.is_valid()) { get_vm_result_2(metadata_result, thread); } return call_offset; @@ -328,38 +328,38 @@ // record saved value locations in an OopMap // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread OopMap* map = new OopMap(frame_size_in_slots, 0); - map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); -#ifdef _LP64 - map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); - map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi.as_VMReg()); +#ifdef _LP64 + map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14.as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15.as_VMReg()); // This is stupid but needed. - map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); - - map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); - map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi.as_VMReg()->next()); + + map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14.as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15.as_VMReg()->next()); #endif // _LP64 if (save_fpu_registers) { @@ -380,7 +380,7 @@ if (UseSSE >= 2) { int xmm_off = xmm_regs_as_doubles_off; for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { - VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); + VMReg xmm_name_0 = as_XMMRegister(n).as_VMReg(); map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); // %%% This is really a waste but we'll keep things as they were for now if (true) { @@ -393,7 +393,7 @@ } else if (UseSSE == 1) { int xmm_off = xmm_regs_as_doubles_off; for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { - VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); + VMReg xmm_name_0 = as_XMMRegister(n).as_VMReg(); map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); xmm_off += 2; } --- old/src/cpu/x86/vm/frame_x86.cpp 2014-04-24 15:53:00.000000000 -1000 +++ new/src/cpu/x86/vm/frame_x86.cpp 2014-04-24 15:53:00.000000000 -1000 @@ -436,14 +436,14 @@ // Since the interpreter always saves EBP/RBP if we record where it is then // we don't have to always save EBP/RBP on entry and exit to c2 compiled // code, on entry will be enough. - map->set_location(rbp->as_VMReg(), (address) link_addr); + map->set_location(rbp.as_VMReg(), (address) link_addr); #ifdef AMD64 // this is weird "H" ought to be at a higher address however the // oopMaps seems to have the "H" regs at the same address and the // vanilla register. // XXXX make this go away if (true) { - map->set_location(rbp->as_VMReg()->next(), (address) link_addr); + map->set_location(rbp.as_VMReg()->next(), (address) link_addr); } #endif // AMD64 } --- old/src/cpu/x86/vm/frame_x86.inline.hpp 2014-04-24 15:53:00.000000000 -1000 +++ new/src/cpu/x86/vm/frame_x86.inline.hpp 2014-04-24 15:53:00.000000000 -1000 @@ -299,14 +299,14 @@ } inline oop frame::saved_oop_result(RegisterMap* map) const { - oop* result_adr = (oop *)map->location(rax->as_VMReg()); + oop* result_adr = (oop *)map->location(rax.as_VMReg()); guarantee(result_adr != NULL, "bad register save location"); return (*result_adr); } inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { - oop* result_adr = (oop *)map->location(rax->as_VMReg()); + oop* result_adr = (oop *)map->location(rax.as_VMReg()); guarantee(result_adr != NULL, "bad register save location"); *result_adr = obj; --- old/src/cpu/x86/vm/macroAssembler_x86.cpp 2014-04-24 15:53:01.000000000 -1000 +++ new/src/cpu/x86/vm/macroAssembler_x86.cpp 2014-04-24 15:53:01.000000000 -1000 @@ -766,12 +766,12 @@ Register last_java_fp, address last_java_pc) { // determine last_java_sp register - if (!last_java_sp->is_valid()) { + if (!last_java_sp.is_valid()) { last_java_sp = rsp; } // last_java_fp is optional - if (last_java_fp->is_valid()) { + if (last_java_fp.is_valid()) { movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp); } @@ -2500,7 +2500,7 @@ int number_of_arguments, bool check_exceptions) { // determine java_thread register - if (!java_thread->is_valid()) { + if (!java_thread.is_valid()) { #ifdef _LP64 java_thread = r15_thread; #else @@ -2509,7 +2509,7 @@ #endif // LP64 } // determine last_java_sp register - if (!last_java_sp->is_valid()) { + if (!last_java_sp.is_valid()) { last_java_sp = rsp; } // debugging support @@ -2587,7 +2587,7 @@ } // get oop result if there is one and reset the value in the thread - if (oop_result->is_valid()) { + if (oop_result.is_valid()) { get_vm_result(oop_result, java_thread); } } @@ -3790,7 +3790,7 @@ void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { // determine java_thread register - if (!java_thread->is_valid()) { + if (!java_thread.is_valid()) { java_thread = rdi; get_thread(java_thread); } @@ -3846,18 +3846,18 @@ Register last_java_fp, address last_java_pc) { // determine java_thread register - if (!java_thread->is_valid()) { + if (!java_thread.is_valid()) { java_thread = rdi; get_thread(java_thread); } // determine last_java_sp register - if (!last_java_sp->is_valid()) { + if (!last_java_sp.is_valid()) { last_java_sp = rsp; } // last_java_fp is optional - if (last_java_fp->is_valid()) { + if (last_java_fp.is_valid()) { movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); } @@ -3881,7 +3881,7 @@ } void MacroAssembler::sign_extend_byte(Register reg) { - if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { + if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg.has_byte_register())) { movsbl(reg, reg); // movsxb } else { shll(reg, 24); @@ -4534,24 +4534,24 @@ Register var_size_in_bytes, int con_size_in_bytes, Register t1) { - if (!thread->is_valid()) { + if (!thread.is_valid()) { #ifdef _LP64 thread = r15_thread; #else - assert(t1->is_valid(), "need temp reg"); + assert(t1.is_valid(), "need temp reg"); thread = t1; get_thread(thread); #endif } #ifdef _LP64 - if (var_size_in_bytes->is_valid()) { + if (var_size_in_bytes.is_valid()) { addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); } else { addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); } #else - if (var_size_in_bytes->is_valid()) { + if (var_size_in_bytes.is_valid()) { addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); } else { addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); @@ -5150,7 +5150,7 @@ { ResourceMark rm; stringStream ss; - ss.print("verify_oop: %s: %s", reg->name(), s); + ss.print("verify_oop: %s: %s", reg.name(), s); b = code_string(ss.as_string()); } BLOCK_COMMENT("verify_oop {"); --- old/src/cpu/x86/vm/methodHandles_x86.cpp 2014-04-24 15:53:02.000000000 -1000 +++ new/src/cpu/x86/vm/methodHandles_x86.cpp 2014-04-24 15:53:01.000000000 -1000 @@ -487,11 +487,11 @@ if (Verbose) { tty->print_cr("Registers:"); - const int saved_regs_count = RegisterImpl::number_of_registers; + const int saved_regs_count = Register::number_of_registers; for (int i = 0; i < saved_regs_count; i++) { Register r = as_Register(i); // The registers are stored in reverse order on the stack (by pusha). - tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]); + tty->print("%3s=" PTR_FORMAT, r.name(), saved_regs[((saved_regs_count - 1) - i)]); if ((i + 1) % 4 == 0) { tty->cr(); } else { --- old/src/cpu/x86/vm/nativeInst_x86.cpp 2014-04-24 15:53:02.000000000 -1000 +++ new/src/cpu/x86/vm/nativeInst_x86.cpp 2014-04-24 15:53:02.000000000 -1000 @@ -515,9 +515,9 @@ } void NativePopReg::insert(address code_pos, Register reg) { - assert(reg->encoding() < 8, "no space for REX"); + assert(reg.encoding() < 8, "no space for REX"); assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); - *code_pos = (u_char)(instruction_code | reg->encoding()); + *code_pos = (u_char)(instruction_code | reg.encoding()); ICache::invalidate_range(code_pos, instruction_size); } --- old/src/cpu/x86/vm/register_x86.cpp 2014-04-24 15:53:03.000000000 -1000 +++ new/src/cpu/x86/vm/register_x86.cpp 2014-04-24 15:53:03.000000000 -1000 @@ -26,17 +26,17 @@ #include "register_x86.hpp" #ifndef AMD64 -const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers; +const int ConcreteRegisterImpl::max_gpr = Register::number_of_registers; #else -const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers << 1; +const int ConcreteRegisterImpl::max_gpr = Register::number_of_registers << 1; #endif // AMD64 const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr + - 2 * FloatRegisterImpl::number_of_registers; + 2 * FloatRegister::number_of_registers; const int ConcreteRegisterImpl::max_xmm = ConcreteRegisterImpl::max_fpr + - 8 * XMMRegisterImpl::number_of_registers; -const char* RegisterImpl::name() const { + 8 * XMMRegister::number_of_registers; +const char* Register::name() const { const char* names[number_of_registers] = { #ifndef AMD64 "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi" @@ -48,18 +48,25 @@ return is_valid() ? names[encoding()] : "noreg"; } -const char* FloatRegisterImpl::name() const { +const char* FloatRegister::name() const { const char* names[number_of_registers] = { "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7" }; return is_valid() ? names[encoding()] : "noreg"; } -const char* XMMRegisterImpl::name() const { +const char* MMXRegister::name() const { const char* names[number_of_registers] = { - "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7" + "mmx0", "mmx1", "mmx2", "mmx3", "mmx4", "mmx5", "mmx6", "mmx7" + }; + return is_valid() ? names[encoding()] : "xnoreg"; +} + +const char* XMMRegister::name() const { + const char* names[number_of_registers] = { + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" #ifdef AMD64 - ,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" + ,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" #endif // AMD64 }; return is_valid() ? names[encoding()] : "xnoreg"; --- old/src/cpu/x86/vm/register_x86.hpp 2014-04-24 15:53:03.000000000 -1000 +++ new/src/cpu/x86/vm/register_x86.hpp 2014-04-24 15:53:03.000000000 -1000 @@ -31,17 +31,10 @@ class VMRegImpl; typedef VMRegImpl* VMReg; -// Use Register as shortcut -class RegisterImpl; -typedef RegisterImpl* Register; - - -// The implementation of integer registers for the ia32 architecture -inline Register as_Register(int encoding) { - return (Register)(intptr_t) encoding; -} - -class RegisterImpl: public AbstractRegisterImpl { +/** + * The implementation of integer registers for the IA32 architecture. + */ +class Register: public AbstractRegister { public: enum { #ifndef AMD64 @@ -53,93 +46,111 @@ #endif // AMD64 }; + /** + * Constructor to construct an invalid register. + */ + Register() : AbstractRegister(-1) {} + + Register(int encoding) : AbstractRegister(encoding) {} + // derived registers, offsets, and addresses - Register successor() const { return as_Register(encoding() + 1); } + Register successor() const { + return Register(encoding() + 1); + } // construction inline friend Register as_Register(int encoding); - VMReg as_VMReg(); + VMReg as_VMReg() const; + + bool is_valid() const { + return 0 <= encoding() && encoding() < number_of_registers; + } + + bool has_byte_register() const { + return 0 <= encoding() && encoding() < number_of_byte_registers; + } - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return (intptr_t)this; } - bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; } - bool has_byte_register() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_byte_registers; } const char* name() const; }; -// The integer registers of the ia32/amd64 architecture +inline Register as_Register(int encoding) { + return Register(encoding); +} -CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); +/** + * The implementation of floating point registers for the IA32 architecture. + */ +class FloatRegister: public AbstractRegister { + public: + enum { + number_of_registers = 8 + }; + /** + * Constructor to construct an invalid register. + */ + FloatRegister() : AbstractRegister(-1) {} -CONSTANT_REGISTER_DECLARATION(Register, rax, (0)); -CONSTANT_REGISTER_DECLARATION(Register, rcx, (1)); -CONSTANT_REGISTER_DECLARATION(Register, rdx, (2)); -CONSTANT_REGISTER_DECLARATION(Register, rbx, (3)); -CONSTANT_REGISTER_DECLARATION(Register, rsp, (4)); -CONSTANT_REGISTER_DECLARATION(Register, rbp, (5)); -CONSTANT_REGISTER_DECLARATION(Register, rsi, (6)); -CONSTANT_REGISTER_DECLARATION(Register, rdi, (7)); -#ifdef AMD64 -CONSTANT_REGISTER_DECLARATION(Register, r8, (8)); -CONSTANT_REGISTER_DECLARATION(Register, r9, (9)); -CONSTANT_REGISTER_DECLARATION(Register, r10, (10)); -CONSTANT_REGISTER_DECLARATION(Register, r11, (11)); -CONSTANT_REGISTER_DECLARATION(Register, r12, (12)); -CONSTANT_REGISTER_DECLARATION(Register, r13, (13)); -CONSTANT_REGISTER_DECLARATION(Register, r14, (14)); -CONSTANT_REGISTER_DECLARATION(Register, r15, (15)); -#endif // AMD64 + FloatRegister(int encoding) : AbstractRegister(encoding) {} + + // construction + inline friend FloatRegister as_FloatRegister(int encoding); -// Use FloatRegister as shortcut -class FloatRegisterImpl; -typedef FloatRegisterImpl* FloatRegister; + VMReg as_VMReg() const; + + // derived registers, offsets, and addresses + FloatRegister successor() const { + return FloatRegister(encoding() + 1); + } + + bool is_valid() const { + return 0 <= encoding() && encoding() < number_of_registers; + } + + const char* name() const; +}; inline FloatRegister as_FloatRegister(int encoding) { - return (FloatRegister)(intptr_t) encoding; + return FloatRegister(encoding); } -// The implementation of floating point registers for the ia32 architecture -class FloatRegisterImpl: public AbstractRegisterImpl { +/** + * The implementation of MMX registers for the IA32 architecture. + */ +class MMXRegister: public AbstractRegister { public: enum { number_of_registers = 8 }; + MMXRegister(int encoding) : AbstractRegister(encoding) {} + // construction - inline friend FloatRegister as_FloatRegister(int encoding); + friend MMXRegister as_MMXRegister(int encoding); - VMReg as_VMReg(); + VMReg as_VMReg() const; // derived registers, offsets, and addresses - FloatRegister successor() const { return as_FloatRegister(encoding() + 1); } + MMXRegister successor() const { + return MMXRegister(encoding() + 1); + } + + bool is_valid() const { + return 0 <= encoding() && encoding() < number_of_registers; + } - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return (intptr_t)this; } - bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; } const char* name() const; - }; -// Use XMMRegister as shortcut -class XMMRegisterImpl; -typedef XMMRegisterImpl* XMMRegister; - -// Use MMXRegister as shortcut -class MMXRegisterImpl; -typedef MMXRegisterImpl* MMXRegister; - -inline XMMRegister as_XMMRegister(int encoding) { - return (XMMRegister)(intptr_t)encoding; -} - inline MMXRegister as_MMXRegister(int encoding) { - return (MMXRegister)(intptr_t)encoding; + return MMXRegister(encoding); } -// The implementation of XMM registers for the IA32 architecture -class XMMRegisterImpl: public AbstractRegisterImpl { +/** + * The implementation of XMM registers for the IA32 architecture. + */ +class XMMRegister: public AbstractRegister { public: enum { #ifndef AMD64 @@ -149,61 +160,38 @@ #endif // AMD64 }; + /** + * Constructor to construct an invalid register. + */ + XMMRegister() : AbstractRegister(-1) {} + + XMMRegister(int encoding) : AbstractRegister(encoding) {} + // construction friend XMMRegister as_XMMRegister(int encoding); - VMReg as_VMReg(); + VMReg as_VMReg() const; // derived registers, offsets, and addresses - XMMRegister successor() const { return as_XMMRegister(encoding() + 1); } + XMMRegister successor() const { + return XMMRegister(encoding() + 1); + } + + bool is_valid() const { + return 0 <= encoding() && encoding() < number_of_registers; + } - // accessors - int encoding() const { assert(is_valid(), err_msg("invalid register (%d)", (int)(intptr_t)this )); return (intptr_t)this; } - bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; } const char* name() const; }; +inline XMMRegister as_XMMRegister(int encoding) { + return XMMRegister(encoding); +} -// The XMM registers, for P3 and up chips -CONSTANT_REGISTER_DECLARATION(XMMRegister, xnoreg , (-1)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm0 , ( 0)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm1 , ( 1)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm2 , ( 2)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm3 , ( 3)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm4 , ( 4)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm5 , ( 5)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm6 , ( 6)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm7 , ( 7)); -#ifdef AMD64 -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm8, (8)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm9, (9)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm10, (10)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm11, (11)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm12, (12)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm13, (13)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm14, (14)); -CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm15, (15)); -#endif // AMD64 - -// Only used by the 32bit stubGenerator. These can't be described by vmreg and hence -// can't be described in oopMaps and therefore can't be used by the compilers (at least -// were deopt might wan't to see them). - -// The MMX registers, for P3 and up chips -CONSTANT_REGISTER_DECLARATION(MMXRegister, mnoreg , (-1)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx0 , ( 0)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx1 , ( 1)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx2 , ( 2)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx3 , ( 3)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx4 , ( 4)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx5 , ( 5)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx6 , ( 6)); -CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx7 , ( 7)); - - -// Need to know the total number of registers of all sorts for SharedInfo. -// Define a class that exports it. -class ConcreteRegisterImpl : public AbstractRegisterImpl { +/** + * Need to know the total number of registers of all sorts for SharedInfo. Define a class that exports it. + */ +class ConcreteRegisterImpl : public AbstractRegister { public: enum { // A big enough number for C2: all the registers plus flags @@ -211,12 +199,12 @@ // There is no requirement that any ordering here matches any ordering c2 gives // it's optoregs. - number_of_registers = RegisterImpl::number_of_registers + + number_of_registers = Register::number_of_registers + #ifdef AMD64 - RegisterImpl::number_of_registers + // "H" half of a 64bit register + Register::number_of_registers + // "H" half of a 64bit register #endif // AMD64 - 2 * FloatRegisterImpl::number_of_registers + - 8 * XMMRegisterImpl::number_of_registers + + 2 * FloatRegister::number_of_registers + + 8 * XMMRegister::number_of_registers + 1 // eflags }; @@ -226,4 +214,180 @@ }; +// Calling convention +class Argument VALUE_OBJ_CLASS_SPEC { + public: + enum { +#ifdef _LP64 +#ifdef _WIN64 + n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) + n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) +#else + n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) + n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) +#endif // _WIN64 + n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... + n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... +#else + n_register_parameters = 0 // 0 registers used to pass arguments +#endif // _LP64 + }; +}; + +/* + * CPU registers. + */ +static const Register noreg(-1); + +static const Register rax(0); +static const Register rcx(1); +static const Register rdx(2); +static const Register rbx(3); +static const Register rsp(4); +static const Register rbp(5); +static const Register rsi(6); +static const Register rdi(7); + +#ifdef AMD64 +static const Register r8(8); +static const Register r9(9); +static const Register r10(10); +static const Register r11(11); +static const Register r12(12); +static const Register r13(13); +static const Register r14(14); +static const Register r15(15); +#endif + +/* + * XMM registers. + */ +static const XMMRegister xnoreg(-1); + +static const XMMRegister xmm0(0); +static const XMMRegister xmm1(1); +static const XMMRegister xmm2(2); +static const XMMRegister xmm3(3); +static const XMMRegister xmm4(4); +static const XMMRegister xmm5(5); +static const XMMRegister xmm6(6); +static const XMMRegister xmm7(7); + +#ifdef AMD64 +static const XMMRegister xmm8(8); +static const XMMRegister xmm9(9); +static const XMMRegister xmm10(10); +static const XMMRegister xmm11(11); +static const XMMRegister xmm12(12); +static const XMMRegister xmm13(13); +static const XMMRegister xmm14(14); +static const XMMRegister xmm15(15); +#endif + +/* + * MMX registers. + * + * Only used by the 32bit stubGenerator. These can't be described by vmreg and hence + * can't be described in oopMaps and therefore can't be used by the compilers (at least + * were deopt might wan't to see them). + */ +static const MMXRegister mnoreg(-1); + +static const MMXRegister mmx0(0); +static const MMXRegister mmx1(1); +static const MMXRegister mmx2(2); +static const MMXRegister mmx3(3); +static const MMXRegister mmx4(4); +static const MMXRegister mmx5(5); +static const MMXRegister mmx6(6); +static const MMXRegister mmx7(7); + +#ifdef _LP64 +// Symbolically name the register arguments used by the c calling convention. +// Windows is different from linux/solaris. So much for standards... + +#ifdef _WIN64 + +static const Register c_rarg0 = rcx; +static const Register c_rarg1 = rdx; +static const Register c_rarg2 = r8; +static const Register c_rarg3 = r9; + +static const XMMRegister c_farg0 = xmm0; +static const XMMRegister c_farg1 = xmm1; +static const XMMRegister c_farg2 = xmm2; +static const XMMRegister c_farg3 = xmm3; + +#else + +static const Register c_rarg0 = rdi; +static const Register c_rarg1 = rsi; +static const Register c_rarg2 = rdx; +static const Register c_rarg3 = rcx; +static const Register c_rarg4 = r8; +static const Register c_rarg5 = r9; + +static const XMMRegister c_farg0 = xmm0; +static const XMMRegister c_farg1 = xmm1; +static const XMMRegister c_farg2 = xmm2; +static const XMMRegister c_farg3 = xmm3; +static const XMMRegister c_farg4 = xmm4; +static const XMMRegister c_farg5 = xmm5; +static const XMMRegister c_farg6 = xmm6; +static const XMMRegister c_farg7 = xmm7; + +#endif // _WIN64 + +// Symbolically name the register arguments used by the Java calling convention. +// We have control over the convention for java so we can do what we please. +// What pleases us is to offset the java calling convention so that when +// we call a suitable jni method the arguments are lined up and we don't +// have to do little shuffling. A suitable jni method is non-static and a +// small number of arguments (two fewer args on windows) +// +// |-------------------------------------------------------| +// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | +// |-------------------------------------------------------| +// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) +// | rdi rsi rdx rcx r8 r9 | solaris/linux +// |-------------------------------------------------------| +// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | +// |-------------------------------------------------------| + +static const Register j_rarg0 = c_rarg1; +static const Register j_rarg1 = c_rarg2; +static const Register j_rarg2 = c_rarg3; +// Windows runs out of register args here +static const Register j_rarg3 = NOT_WIN64(c_rarg4) WIN64_ONLY(rdi); +static const Register j_rarg4 = NOT_WIN64(c_rarg5) WIN64_ONLY(rsi); +static const Register j_rarg5 = c_rarg0; + +static const XMMRegister j_farg0 = xmm0; +static const XMMRegister j_farg1 = xmm1; +static const XMMRegister j_farg2 = xmm2; +static const XMMRegister j_farg3 = xmm3; +static const XMMRegister j_farg4 = xmm4; +static const XMMRegister j_farg5 = xmm5; +static const XMMRegister j_farg6 = xmm6; +static const XMMRegister j_farg7 = xmm7; + +static const Register rscratch1 = r10; // volatile +static const Register rscratch2 = r11; // volatile + +static const Register r12_heapbase = r12; // callee-saved +static const Register r15_thread = r15; // callee-saved + +#else +// rscratch1 will appear in 32bit code that is dead but of course must compile. +// Using noreg ensures if the dead code is incorrectly live and executed it +// will cause an assertion failure. + +static const Register rscratch1 = noreg; +static const Register rscratch2 = noreg; + +#endif // _LP64 + +// JSR 292 fixed register usages: +static const Register rbp_mh_SP_save = rbp; + #endif // CPU_X86_VM_REGISTER_X86_HPP --- old/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2014-04-24 15:53:04.000000000 -1000 +++ new/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2014-04-24 15:53:04.000000000 -1000 @@ -203,72 +203,72 @@ #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots) - map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg()); + map->set_callee_saved(STACK_OFFSET( rax_off ), rax.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx.as_VMReg()); // rbp location is known implicitly by the frame sender code, needs no oopmap // and the location where rbp was saved by is ignored - map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg()); - map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm0_off ), xmm0->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm1_off ), xmm1->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm2_off ), xmm2->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm3_off ), xmm3->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm4_off ), xmm4->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm5_off ), xmm5->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm6_off ), xmm6->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm7_off ), xmm7->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm8_off ), xmm8->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm9_off ), xmm9->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm10_off), xmm10->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm11_off), xmm11->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm12_off), xmm12->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm13_off), xmm13->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm14_off), xmm14->as_VMReg()); - map->set_callee_saved(STACK_OFFSET(xmm15_off), xmm15->as_VMReg()); + map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r8_off ), r8.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r9_off ), r9.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r10_off ), r10.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r11_off ), r11.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r12_off ), r12.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r13_off ), r13.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r14_off ), r14.as_VMReg()); + map->set_callee_saved(STACK_OFFSET( r15_off ), r15.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm0_off ), xmm0.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm1_off ), xmm1.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm2_off ), xmm2.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm3_off ), xmm3.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm4_off ), xmm4.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm5_off ), xmm5.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm6_off ), xmm6.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm7_off ), xmm7.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm8_off ), xmm8.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm9_off ), xmm9.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm10_off), xmm10.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm11_off), xmm11.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm12_off), xmm12.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm13_off), xmm13.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm14_off), xmm14.as_VMReg()); + map->set_callee_saved(STACK_OFFSET(xmm15_off), xmm15.as_VMReg()); // %%% These should all be a waste but we'll keep things as they were for now if (true) { - map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( raxH_off ), rax.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx.as_VMReg()->next()); // rbp location is known implicitly by the frame sender code, needs no oopmap - map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm0H_off ), xmm0->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm1H_off ), xmm1->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm2H_off ), xmm2->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm3H_off ), xmm3->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm4H_off ), xmm4->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm5H_off ), xmm5->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm6H_off ), xmm6->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm7H_off ), xmm7->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm8H_off ), xmm8->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next()); - map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r8H_off ), r8.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r9H_off ), r9.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r10H_off ), r10.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r11H_off ), r11.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r12H_off ), r12.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r13H_off ), r13.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r14H_off ), r14.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET( r15H_off ), r15.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm0H_off ), xmm0.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm1H_off ), xmm1.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm2H_off ), xmm2.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm3H_off ), xmm3.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm4H_off ), xmm4.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm5H_off ), xmm5.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm6H_off ), xmm6.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm7H_off ), xmm7.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm8H_off ), xmm8.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14.as_VMReg()->next()); + map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15.as_VMReg()->next()); } return map; @@ -398,7 +398,7 @@ case T_SHORT: case T_INT: if (int_args < Argument::n_int_register_parameters_j) { - regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); + regs[i].set1(INT_ArgReg[int_args++].as_VMReg()); } else { regs[i].set1(VMRegImpl::stack2reg(stk_args)); stk_args += 2; @@ -416,7 +416,7 @@ case T_ARRAY: case T_ADDRESS: if (int_args < Argument::n_int_register_parameters_j) { - regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); + regs[i].set2(INT_ArgReg[int_args++].as_VMReg()); } else { regs[i].set2(VMRegImpl::stack2reg(stk_args)); stk_args += 2; @@ -424,7 +424,7 @@ break; case T_FLOAT: if (fp_args < Argument::n_float_register_parameters_j) { - regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); + regs[i].set1(FP_ArgReg[fp_args++].as_VMReg()); } else { regs[i].set1(VMRegImpl::stack2reg(stk_args)); stk_args += 2; @@ -433,7 +433,7 @@ case T_DOUBLE: assert(sig_bt[i + 1] == T_VOID, "expecting half"); if (fp_args < Argument::n_float_register_parameters_j) { - regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); + regs[i].set2(FP_ArgReg[fp_args++].as_VMReg()); } else { regs[i].set2(VMRegImpl::stack2reg(stk_args)); stk_args += 2; @@ -926,7 +926,7 @@ case T_SHORT: case T_INT: if (int_args < Argument::n_int_register_parameters_c) { - regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); + regs[i].set1(INT_ArgReg[int_args++].as_VMReg()); #ifdef _WIN64 fp_args++; // Allocate slots for callee to stuff register args the stack. @@ -945,7 +945,7 @@ case T_ADDRESS: case T_METADATA: if (int_args < Argument::n_int_register_parameters_c) { - regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); + regs[i].set2(INT_ArgReg[int_args++].as_VMReg()); #ifdef _WIN64 fp_args++; stk_args += 2; @@ -957,7 +957,7 @@ break; case T_FLOAT: if (fp_args < Argument::n_float_register_parameters_c) { - regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); + regs[i].set1(FP_ArgReg[fp_args++].as_VMReg()); #ifdef _WIN64 int_args++; // Allocate slots for callee to stuff register args the stack. @@ -971,7 +971,7 @@ case T_DOUBLE: assert(sig_bt[i + 1] == T_VOID, "expecting half"); if (fp_args < Argument::n_float_register_parameters_c) { - regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); + regs[i].set2(FP_ArgReg[fp_args++].as_VMReg()); #ifdef _WIN64 int_args++; // Allocate slots for callee to stuff register args the stack. @@ -1436,7 +1436,7 @@ // Pass the length, ptr pair Label is_null, done; VMRegPair tmp; - tmp.set_ptr(tmp_reg->as_VMReg()); + tmp.set_ptr(tmp_reg.as_VMReg()); if (reg.first()->is_stack()) { // Load the arg up from the stack move_ptr(masm, reg, tmp); @@ -2078,12 +2078,12 @@ #ifdef ASSERT - bool reg_destroyed[RegisterImpl::number_of_registers]; - bool freg_destroyed[XMMRegisterImpl::number_of_registers]; - for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { + bool reg_destroyed[Register::number_of_registers]; + bool freg_destroyed[XMMRegister::number_of_registers]; + for ( int r = 0 ; r < Register::number_of_registers ; r++ ) { reg_destroyed[r] = false; } - for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) { + for ( int f = 0 ; f < XMMRegister::number_of_registers ; f++ ) { freg_destroyed[f] = false; } @@ -2095,7 +2095,7 @@ // critical natives they are offset down. GrowableArray arg_order(2 * total_in_args); VMRegPair tmp_vmreg; - tmp_vmreg.set1(rbx->as_VMReg()); + tmp_vmreg.set1(rbx.as_VMReg()); if (!is_critical_native) { for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { @@ -2128,14 +2128,14 @@ } #ifdef ASSERT if (in_regs[i].first()->is_Register()) { - assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); + assert(!reg_destroyed[in_regs[i].first()->as_Register().encoding()], "destroyed reg!"); } else if (in_regs[i].first()->is_XMMRegister()) { - assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!"); + assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister().encoding()], "destroyed reg!"); } if (out_regs[c_arg].first()->is_Register()) { - reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; + reg_destroyed[out_regs[c_arg].first()->as_Register().encoding()] = true; } else if (out_regs[c_arg].first()->is_XMMRegister()) { - freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true; + freg_destroyed[out_regs[c_arg].first()->as_XMMRegister().encoding()] = true; } #endif /* ASSERT */ switch (in_sig_bt[i]) { @@ -2145,9 +2145,9 @@ c_arg++; #ifdef ASSERT if (out_regs[c_arg].first()->is_Register()) { - reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; + reg_destroyed[out_regs[c_arg].first()->as_Register().encoding()] = true; } else if (out_regs[c_arg].first()->is_XMMRegister()) { - freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true; + freg_destroyed[out_regs[c_arg].first()->as_XMMRegister().encoding()] = true; } #endif break; @@ -2679,21 +2679,21 @@ assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); if (!offsets_initialized) { - fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize; - fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize; - fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize; - fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize; - fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize; - fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize; - - fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize; - fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize; - fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize; - fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize; - fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize; - fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize; - fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize; - fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize; + fp_offset[c_rarg0.as_VMReg()->value()] = -1 * wordSize; + fp_offset[c_rarg1.as_VMReg()->value()] = -2 * wordSize; + fp_offset[c_rarg2.as_VMReg()->value()] = -3 * wordSize; + fp_offset[c_rarg3.as_VMReg()->value()] = -4 * wordSize; + fp_offset[c_rarg4.as_VMReg()->value()] = -5 * wordSize; + fp_offset[c_rarg5.as_VMReg()->value()] = -6 * wordSize; + + fp_offset[c_farg0.as_VMReg()->value()] = -7 * wordSize; + fp_offset[c_farg1.as_VMReg()->value()] = -8 * wordSize; + fp_offset[c_farg2.as_VMReg()->value()] = -9 * wordSize; + fp_offset[c_farg3.as_VMReg()->value()] = -10 * wordSize; + fp_offset[c_farg4.as_VMReg()->value()] = -11 * wordSize; + fp_offset[c_farg5.as_VMReg()->value()] = -12 * wordSize; + fp_offset[c_farg6.as_VMReg()->value()] = -13 * wordSize; + fp_offset[c_farg7.as_VMReg()->value()] = -14 * wordSize; offsets_initialized = true; } @@ -2892,21 +2892,21 @@ bool live[ConcreteRegisterImpl::number_of_registers]; - live[j_rarg0->as_VMReg()->value()] = false; - live[j_rarg1->as_VMReg()->value()] = false; - live[j_rarg2->as_VMReg()->value()] = false; - live[j_rarg3->as_VMReg()->value()] = false; - live[j_rarg4->as_VMReg()->value()] = false; - live[j_rarg5->as_VMReg()->value()] = false; - - live[j_farg0->as_VMReg()->value()] = false; - live[j_farg1->as_VMReg()->value()] = false; - live[j_farg2->as_VMReg()->value()] = false; - live[j_farg3->as_VMReg()->value()] = false; - live[j_farg4->as_VMReg()->value()] = false; - live[j_farg5->as_VMReg()->value()] = false; - live[j_farg6->as_VMReg()->value()] = false; - live[j_farg7->as_VMReg()->value()] = false; + live[j_rarg0.as_VMReg()->value()] = false; + live[j_rarg1.as_VMReg()->value()] = false; + live[j_rarg2.as_VMReg()->value()] = false; + live[j_rarg3.as_VMReg()->value()] = false; + live[j_rarg4.as_VMReg()->value()] = false; + live[j_rarg5.as_VMReg()->value()] = false; + + live[j_farg0.as_VMReg()->value()] = false; + live[j_farg1.as_VMReg()->value()] = false; + live[j_farg2.as_VMReg()->value()] = false; + live[j_farg3.as_VMReg()->value()] = false; + live[j_farg4.as_VMReg()->value()] = false; + live[j_farg5.as_VMReg()->value()] = false; + live[j_farg6.as_VMReg()->value()] = false; + live[j_farg7.as_VMReg()->value()] = false; bool rax_is_zero = false; @@ -2921,7 +2921,7 @@ int src_reg = src.first()->is_reg() ? src.first()->value() : - rsp->as_VMReg()->value(); + rsp.as_VMReg()->value(); bool useless = in_sig_bt[j_arg] == T_ARRAY || (in_sig_bt[j_arg] == T_OBJECT && @@ -3138,21 +3138,21 @@ } } // The get_utf call killed all the c_arg registers - live[c_rarg0->as_VMReg()->value()] = false; - live[c_rarg1->as_VMReg()->value()] = false; - live[c_rarg2->as_VMReg()->value()] = false; - live[c_rarg3->as_VMReg()->value()] = false; - live[c_rarg4->as_VMReg()->value()] = false; - live[c_rarg5->as_VMReg()->value()] = false; - - live[c_farg0->as_VMReg()->value()] = false; - live[c_farg1->as_VMReg()->value()] = false; - live[c_farg2->as_VMReg()->value()] = false; - live[c_farg3->as_VMReg()->value()] = false; - live[c_farg4->as_VMReg()->value()] = false; - live[c_farg5->as_VMReg()->value()] = false; - live[c_farg6->as_VMReg()->value()] = false; - live[c_farg7->as_VMReg()->value()] = false; + live[c_rarg0.as_VMReg()->value()] = false; + live[c_rarg1.as_VMReg()->value()] = false; + live[c_rarg2.as_VMReg()->value()] = false; + live[c_rarg3.as_VMReg()->value()] = false; + live[c_rarg4.as_VMReg()->value()] = false; + live[c_rarg5.as_VMReg()->value()] = false; + + live[c_farg0.as_VMReg()->value()] = false; + live[c_farg1.as_VMReg()->value()] = false; + live[c_farg2.as_VMReg()->value()] = false; + live[c_farg3.as_VMReg()->value()] = false; + live[c_farg4.as_VMReg()->value()] = false; + live[c_farg5.as_VMReg()->value()] = false; + live[c_farg6.as_VMReg()->value()] = false; + live[c_farg7.as_VMReg()->value()] = false; } // Now we can finally move the register args to their desired locations --- old/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2014-04-24 15:53:04.000000000 -1000 +++ new/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2014-04-24 15:53:04.000000000 -1000 @@ -942,7 +942,7 @@ __ push(0); // hole for return address-to-be __ pusha(); // push registers - Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); + Address next_pc(rsp, Register::number_of_registers * BytesPerWord); // FIXME: this probably needs alignment logic @@ -3010,9 +3010,9 @@ // Utility routine for loading a 128-bit key word in little endian format // can optionally specify that the shuffle mask is already in an xmmregister - void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { + void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = xnoreg) { __ movdqu(xmmdst, Address(key, offset)); - if (xmm_shuf_mask != NULL) { + if (xmm_shuf_mask != xnoreg) { __ pshufb(xmmdst, xmm_shuf_mask); } else { __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); --- old/src/cpu/x86/vm/vmreg_x86.cpp 2014-04-24 15:53:05.000000000 -1000 +++ new/src/cpu/x86/vm/vmreg_x86.cpp 2014-04-24 15:53:05.000000000 -1000 @@ -31,29 +31,29 @@ void VMRegImpl::set_regName() { Register reg = ::as_Register(0); int i; - for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) { - regName[i++] = reg->name(); + for (i = 0; i < ConcreteRegisterImpl::max_gpr; ) { + regName[i++] = reg.name(); #ifdef AMD64 - regName[i++] = reg->name(); + regName[i++] = reg.name(); #endif // AMD64 - reg = reg->successor(); + reg = reg.successor(); } FloatRegister freg = ::as_FloatRegister(0); - for ( ; i < ConcreteRegisterImpl::max_fpr ; ) { - regName[i++] = freg->name(); - regName[i++] = freg->name(); - freg = freg->successor(); + for ( ; i < ConcreteRegisterImpl::max_fpr; ) { + regName[i++] = freg.name(); + regName[i++] = freg.name(); + freg = freg.successor(); } XMMRegister xreg = ::as_XMMRegister(0); - for ( ; i < ConcreteRegisterImpl::max_xmm ; ) { + for ( ; i < ConcreteRegisterImpl::max_xmm; ) { for (int j = 0 ; j < 8 ; j++) { - regName[i++] = xreg->name(); + regName[i++] = xreg.name(); } - xreg = xreg->successor(); + xreg = xreg.successor(); } - for ( ; i < ConcreteRegisterImpl::number_of_registers ; i ++ ) { + for ( ; i < ConcreteRegisterImpl::number_of_registers; i++) { regName[i] = "NON-GPR-FPR-XMM"; } } --- old/src/cpu/x86/vm/vmreg_x86.inline.hpp 2014-04-24 15:53:06.000000000 -1000 +++ new/src/cpu/x86/vm/vmreg_x86.inline.hpp 2014-04-24 15:53:06.000000000 -1000 @@ -25,8 +25,10 @@ #ifndef CPU_X86_VM_VMREG_X86_INLINE_HPP #define CPU_X86_VM_VMREG_X86_INLINE_HPP -inline VMReg RegisterImpl::as_VMReg() { - if( this==noreg ) return VMRegImpl::Bad(); +inline VMReg Register::as_VMReg() const { + if (encoding() == noreg.encoding()) { + return VMRegImpl::Bad(); + } #ifdef AMD64 return VMRegImpl::as_VMReg(encoding() << 1 ); #else @@ -34,11 +36,11 @@ #endif // AMD64 } -inline VMReg FloatRegisterImpl::as_VMReg() { +inline VMReg FloatRegister::as_VMReg() const { return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr); } -inline VMReg XMMRegisterImpl::as_VMReg() { +inline VMReg XMMRegister::as_VMReg() const { return VMRegImpl::as_VMReg((encoding() << 3) + ConcreteRegisterImpl::max_fpr); } --- old/src/cpu/x86/vm/vtableStubs_x86_64.cpp 2014-04-24 15:53:06.000000000 -1000 +++ new/src/cpu/x86/vm/vtableStubs_x86_64.cpp 2014-04-24 15:53:06.000000000 -1000 @@ -65,7 +65,7 @@ #endif // get receiver (need to skip return address on top of stack) - assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); + assert(VtableStub::receiver_location() == j_rarg0.as_VMReg(), "receiver expected in j_rarg0"); // Free registers (non-args) are rax, rbx @@ -154,7 +154,7 @@ // get receiver (need to skip return address on top of stack) - assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); + assert(VtableStub::receiver_location() == j_rarg0.as_VMReg(), "receiver expected in j_rarg0"); // get receiver klass (also an implicit null-check) address npe_addr = __ pc(); --- old/src/cpu/x86/vm/x86.ad 2014-04-24 15:53:07.000000000 -1000 +++ new/src/cpu/x86/vm/x86.ad 2014-04-24 15:53:07.000000000 -1000 @@ -70,245 +70,245 @@ // Windows ABI: XMM6-XMM15 preserved across function calls // XMM0-XMM3 might hold parameters -reg_def XMM0 ( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()); -reg_def XMM0b( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(1)); -reg_def XMM0c( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(2)); -reg_def XMM0d( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(3)); -reg_def XMM0e( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(4)); -reg_def XMM0f( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(5)); -reg_def XMM0g( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(6)); -reg_def XMM0h( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(7)); - -reg_def XMM1 ( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()); -reg_def XMM1b( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(1)); -reg_def XMM1c( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(2)); -reg_def XMM1d( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(3)); -reg_def XMM1e( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(4)); -reg_def XMM1f( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(5)); -reg_def XMM1g( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(6)); -reg_def XMM1h( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(7)); - -reg_def XMM2 ( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()); -reg_def XMM2b( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(1)); -reg_def XMM2c( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(2)); -reg_def XMM2d( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(3)); -reg_def XMM2e( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(4)); -reg_def XMM2f( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(5)); -reg_def XMM2g( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(6)); -reg_def XMM2h( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(7)); - -reg_def XMM3 ( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()); -reg_def XMM3b( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(1)); -reg_def XMM3c( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(2)); -reg_def XMM3d( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(3)); -reg_def XMM3e( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(4)); -reg_def XMM3f( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(5)); -reg_def XMM3g( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(6)); -reg_def XMM3h( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(7)); - -reg_def XMM4 ( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()); -reg_def XMM4b( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(1)); -reg_def XMM4c( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(2)); -reg_def XMM4d( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(3)); -reg_def XMM4e( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(4)); -reg_def XMM4f( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(5)); -reg_def XMM4g( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(6)); -reg_def XMM4h( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(7)); - -reg_def XMM5 ( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()); -reg_def XMM5b( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(1)); -reg_def XMM5c( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(2)); -reg_def XMM5d( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(3)); -reg_def XMM5e( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(4)); -reg_def XMM5f( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(5)); -reg_def XMM5g( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(6)); -reg_def XMM5h( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(7)); +reg_def XMM0 ( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()); +reg_def XMM0b( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()->next(1)); +reg_def XMM0c( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()->next(2)); +reg_def XMM0d( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()->next(3)); +reg_def XMM0e( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()->next(4)); +reg_def XMM0f( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()->next(5)); +reg_def XMM0g( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()->next(6)); +reg_def XMM0h( SOC, SOC, Op_RegF, 0, xmm0.as_VMReg()->next(7)); + +reg_def XMM1 ( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()); +reg_def XMM1b( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()->next(1)); +reg_def XMM1c( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()->next(2)); +reg_def XMM1d( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()->next(3)); +reg_def XMM1e( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()->next(4)); +reg_def XMM1f( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()->next(5)); +reg_def XMM1g( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()->next(6)); +reg_def XMM1h( SOC, SOC, Op_RegF, 1, xmm1.as_VMReg()->next(7)); + +reg_def XMM2 ( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()); +reg_def XMM2b( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()->next(1)); +reg_def XMM2c( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()->next(2)); +reg_def XMM2d( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()->next(3)); +reg_def XMM2e( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()->next(4)); +reg_def XMM2f( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()->next(5)); +reg_def XMM2g( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()->next(6)); +reg_def XMM2h( SOC, SOC, Op_RegF, 2, xmm2.as_VMReg()->next(7)); + +reg_def XMM3 ( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()); +reg_def XMM3b( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()->next(1)); +reg_def XMM3c( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()->next(2)); +reg_def XMM3d( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()->next(3)); +reg_def XMM3e( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()->next(4)); +reg_def XMM3f( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()->next(5)); +reg_def XMM3g( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()->next(6)); +reg_def XMM3h( SOC, SOC, Op_RegF, 3, xmm3.as_VMReg()->next(7)); + +reg_def XMM4 ( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()); +reg_def XMM4b( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()->next(1)); +reg_def XMM4c( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()->next(2)); +reg_def XMM4d( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()->next(3)); +reg_def XMM4e( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()->next(4)); +reg_def XMM4f( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()->next(5)); +reg_def XMM4g( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()->next(6)); +reg_def XMM4h( SOC, SOC, Op_RegF, 4, xmm4.as_VMReg()->next(7)); + +reg_def XMM5 ( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()); +reg_def XMM5b( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()->next(1)); +reg_def XMM5c( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()->next(2)); +reg_def XMM5d( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()->next(3)); +reg_def XMM5e( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()->next(4)); +reg_def XMM5f( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()->next(5)); +reg_def XMM5g( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()->next(6)); +reg_def XMM5h( SOC, SOC, Op_RegF, 5, xmm5.as_VMReg()->next(7)); #ifdef _WIN64 -reg_def XMM6 ( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()); -reg_def XMM6b( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(1)); -reg_def XMM6c( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(2)); -reg_def XMM6d( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(3)); -reg_def XMM6e( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(4)); -reg_def XMM6f( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(5)); -reg_def XMM6g( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(6)); -reg_def XMM6h( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(7)); - -reg_def XMM7 ( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()); -reg_def XMM7b( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(1)); -reg_def XMM7c( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(2)); -reg_def XMM7d( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(3)); -reg_def XMM7e( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(4)); -reg_def XMM7f( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(5)); -reg_def XMM7g( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(6)); -reg_def XMM7h( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(7)); - -reg_def XMM8 ( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()); -reg_def XMM8b( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(1)); -reg_def XMM8c( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(2)); -reg_def XMM8d( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(3)); -reg_def XMM8e( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(4)); -reg_def XMM8f( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(5)); -reg_def XMM8g( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(6)); -reg_def XMM8h( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(7)); - -reg_def XMM9 ( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()); -reg_def XMM9b( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(1)); -reg_def XMM9c( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(2)); -reg_def XMM9d( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(3)); -reg_def XMM9e( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(4)); -reg_def XMM9f( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(5)); -reg_def XMM9g( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(6)); -reg_def XMM9h( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(7)); - -reg_def XMM10 ( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()); -reg_def XMM10b( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(1)); -reg_def XMM10c( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(2)); -reg_def XMM10d( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(3)); -reg_def XMM10e( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(4)); -reg_def XMM10f( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(5)); -reg_def XMM10g( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(6)); -reg_def XMM10h( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(7)); - -reg_def XMM11 ( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()); -reg_def XMM11b( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(1)); -reg_def XMM11c( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(2)); -reg_def XMM11d( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(3)); -reg_def XMM11e( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(4)); -reg_def XMM11f( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(5)); -reg_def XMM11g( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(6)); -reg_def XMM11h( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(7)); - -reg_def XMM12 ( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()); -reg_def XMM12b( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(1)); -reg_def XMM12c( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(2)); -reg_def XMM12d( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(3)); -reg_def XMM12e( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(4)); -reg_def XMM12f( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(5)); -reg_def XMM12g( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(6)); -reg_def XMM12h( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(7)); - -reg_def XMM13 ( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()); -reg_def XMM13b( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(1)); -reg_def XMM13c( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(2)); -reg_def XMM13d( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(3)); -reg_def XMM13e( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(4)); -reg_def XMM13f( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(5)); -reg_def XMM13g( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(6)); -reg_def XMM13h( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(7)); - -reg_def XMM14 ( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()); -reg_def XMM14b( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(1)); -reg_def XMM14c( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(2)); -reg_def XMM14d( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(3)); -reg_def XMM14e( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(4)); -reg_def XMM14f( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(5)); -reg_def XMM14g( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(6)); -reg_def XMM14h( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(7)); - -reg_def XMM15 ( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()); -reg_def XMM15b( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(1)); -reg_def XMM15c( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(2)); -reg_def XMM15d( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(3)); -reg_def XMM15e( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(4)); -reg_def XMM15f( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(5)); -reg_def XMM15g( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(6)); -reg_def XMM15h( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(7)); +reg_def XMM6 ( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()); +reg_def XMM6b( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()->next(1)); +reg_def XMM6c( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()->next(2)); +reg_def XMM6d( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()->next(3)); +reg_def XMM6e( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()->next(4)); +reg_def XMM6f( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()->next(5)); +reg_def XMM6g( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()->next(6)); +reg_def XMM6h( SOC, SOE, Op_RegF, 6, xmm6.as_VMReg()->next(7)); + +reg_def XMM7 ( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()); +reg_def XMM7b( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()->next(1)); +reg_def XMM7c( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()->next(2)); +reg_def XMM7d( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()->next(3)); +reg_def XMM7e( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()->next(4)); +reg_def XMM7f( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()->next(5)); +reg_def XMM7g( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()->next(6)); +reg_def XMM7h( SOC, SOE, Op_RegF, 7, xmm7.as_VMReg()->next(7)); + +reg_def XMM8 ( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()); +reg_def XMM8b( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()->next(1)); +reg_def XMM8c( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()->next(2)); +reg_def XMM8d( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()->next(3)); +reg_def XMM8e( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()->next(4)); +reg_def XMM8f( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()->next(5)); +reg_def XMM8g( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()->next(6)); +reg_def XMM8h( SOC, SOE, Op_RegF, 8, xmm8.as_VMReg()->next(7)); + +reg_def XMM9 ( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()); +reg_def XMM9b( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()->next(1)); +reg_def XMM9c( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()->next(2)); +reg_def XMM9d( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()->next(3)); +reg_def XMM9e( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()->next(4)); +reg_def XMM9f( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()->next(5)); +reg_def XMM9g( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()->next(6)); +reg_def XMM9h( SOC, SOE, Op_RegF, 9, xmm9.as_VMReg()->next(7)); + +reg_def XMM10 ( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()); +reg_def XMM10b( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()->next(1)); +reg_def XMM10c( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()->next(2)); +reg_def XMM10d( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()->next(3)); +reg_def XMM10e( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()->next(4)); +reg_def XMM10f( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()->next(5)); +reg_def XMM10g( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()->next(6)); +reg_def XMM10h( SOC, SOE, Op_RegF, 10, xmm10.as_VMReg()->next(7)); + +reg_def XMM11 ( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()); +reg_def XMM11b( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()->next(1)); +reg_def XMM11c( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()->next(2)); +reg_def XMM11d( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()->next(3)); +reg_def XMM11e( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()->next(4)); +reg_def XMM11f( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()->next(5)); +reg_def XMM11g( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()->next(6)); +reg_def XMM11h( SOC, SOE, Op_RegF, 11, xmm11.as_VMReg()->next(7)); + +reg_def XMM12 ( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()); +reg_def XMM12b( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()->next(1)); +reg_def XMM12c( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()->next(2)); +reg_def XMM12d( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()->next(3)); +reg_def XMM12e( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()->next(4)); +reg_def XMM12f( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()->next(5)); +reg_def XMM12g( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()->next(6)); +reg_def XMM12h( SOC, SOE, Op_RegF, 12, xmm12.as_VMReg()->next(7)); + +reg_def XMM13 ( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()); +reg_def XMM13b( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()->next(1)); +reg_def XMM13c( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()->next(2)); +reg_def XMM13d( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()->next(3)); +reg_def XMM13e( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()->next(4)); +reg_def XMM13f( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()->next(5)); +reg_def XMM13g( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()->next(6)); +reg_def XMM13h( SOC, SOE, Op_RegF, 13, xmm13.as_VMReg()->next(7)); + +reg_def XMM14 ( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()); +reg_def XMM14b( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()->next(1)); +reg_def XMM14c( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()->next(2)); +reg_def XMM14d( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()->next(3)); +reg_def XMM14e( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()->next(4)); +reg_def XMM14f( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()->next(5)); +reg_def XMM14g( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()->next(6)); +reg_def XMM14h( SOC, SOE, Op_RegF, 14, xmm14.as_VMReg()->next(7)); + +reg_def XMM15 ( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()); +reg_def XMM15b( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()->next(1)); +reg_def XMM15c( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()->next(2)); +reg_def XMM15d( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()->next(3)); +reg_def XMM15e( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()->next(4)); +reg_def XMM15f( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()->next(5)); +reg_def XMM15g( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()->next(6)); +reg_def XMM15h( SOC, SOE, Op_RegF, 15, xmm15.as_VMReg()->next(7)); #else // _WIN64 -reg_def XMM6 ( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()); -reg_def XMM6b( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(1)); -reg_def XMM6c( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(2)); -reg_def XMM6d( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(3)); -reg_def XMM6e( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(4)); -reg_def XMM6f( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(5)); -reg_def XMM6g( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(6)); -reg_def XMM6h( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(7)); - -reg_def XMM7 ( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()); -reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(1)); -reg_def XMM7c( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(2)); -reg_def XMM7d( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(3)); -reg_def XMM7e( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(4)); -reg_def XMM7f( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(5)); -reg_def XMM7g( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(6)); -reg_def XMM7h( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(7)); +reg_def XMM6 ( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()); +reg_def XMM6b( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()->next(1)); +reg_def XMM6c( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()->next(2)); +reg_def XMM6d( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()->next(3)); +reg_def XMM6e( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()->next(4)); +reg_def XMM6f( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()->next(5)); +reg_def XMM6g( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()->next(6)); +reg_def XMM6h( SOC, SOC, Op_RegF, 6, xmm6.as_VMReg()->next(7)); + +reg_def XMM7 ( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()); +reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()->next(1)); +reg_def XMM7c( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()->next(2)); +reg_def XMM7d( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()->next(3)); +reg_def XMM7e( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()->next(4)); +reg_def XMM7f( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()->next(5)); +reg_def XMM7g( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()->next(6)); +reg_def XMM7h( SOC, SOC, Op_RegF, 7, xmm7.as_VMReg()->next(7)); #ifdef _LP64 -reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()); -reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(1)); -reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(2)); -reg_def XMM8d( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(3)); -reg_def XMM8e( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(4)); -reg_def XMM8f( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(5)); -reg_def XMM8g( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(6)); -reg_def XMM8h( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(7)); - -reg_def XMM9 ( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()); -reg_def XMM9b( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(1)); -reg_def XMM9c( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(2)); -reg_def XMM9d( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(3)); -reg_def XMM9e( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(4)); -reg_def XMM9f( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(5)); -reg_def XMM9g( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(6)); -reg_def XMM9h( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(7)); - -reg_def XMM10 ( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()); -reg_def XMM10b( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(1)); -reg_def XMM10c( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(2)); -reg_def XMM10d( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(3)); -reg_def XMM10e( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(4)); -reg_def XMM10f( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(5)); -reg_def XMM10g( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(6)); -reg_def XMM10h( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(7)); - -reg_def XMM11 ( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()); -reg_def XMM11b( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(1)); -reg_def XMM11c( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(2)); -reg_def XMM11d( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(3)); -reg_def XMM11e( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(4)); -reg_def XMM11f( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(5)); -reg_def XMM11g( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(6)); -reg_def XMM11h( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(7)); - -reg_def XMM12 ( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()); -reg_def XMM12b( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(1)); -reg_def XMM12c( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(2)); -reg_def XMM12d( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(3)); -reg_def XMM12e( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(4)); -reg_def XMM12f( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(5)); -reg_def XMM12g( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(6)); -reg_def XMM12h( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(7)); - -reg_def XMM13 ( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()); -reg_def XMM13b( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(1)); -reg_def XMM13c( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(2)); -reg_def XMM13d( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(3)); -reg_def XMM13e( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(4)); -reg_def XMM13f( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(5)); -reg_def XMM13g( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(6)); -reg_def XMM13h( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(7)); - -reg_def XMM14 ( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()); -reg_def XMM14b( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(1)); -reg_def XMM14c( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(2)); -reg_def XMM14d( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(3)); -reg_def XMM14e( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(4)); -reg_def XMM14f( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(5)); -reg_def XMM14g( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(6)); -reg_def XMM14h( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(7)); - -reg_def XMM15 ( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()); -reg_def XMM15b( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(1)); -reg_def XMM15c( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(2)); -reg_def XMM15d( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(3)); -reg_def XMM15e( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(4)); -reg_def XMM15f( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(5)); -reg_def XMM15g( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(6)); -reg_def XMM15h( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(7)); +reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()); +reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()->next(1)); +reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()->next(2)); +reg_def XMM8d( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()->next(3)); +reg_def XMM8e( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()->next(4)); +reg_def XMM8f( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()->next(5)); +reg_def XMM8g( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()->next(6)); +reg_def XMM8h( SOC, SOC, Op_RegF, 8, xmm8.as_VMReg()->next(7)); + +reg_def XMM9 ( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()); +reg_def XMM9b( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()->next(1)); +reg_def XMM9c( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()->next(2)); +reg_def XMM9d( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()->next(3)); +reg_def XMM9e( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()->next(4)); +reg_def XMM9f( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()->next(5)); +reg_def XMM9g( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()->next(6)); +reg_def XMM9h( SOC, SOC, Op_RegF, 9, xmm9.as_VMReg()->next(7)); + +reg_def XMM10 ( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()); +reg_def XMM10b( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()->next(1)); +reg_def XMM10c( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()->next(2)); +reg_def XMM10d( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()->next(3)); +reg_def XMM10e( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()->next(4)); +reg_def XMM10f( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()->next(5)); +reg_def XMM10g( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()->next(6)); +reg_def XMM10h( SOC, SOC, Op_RegF, 10, xmm10.as_VMReg()->next(7)); + +reg_def XMM11 ( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()); +reg_def XMM11b( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()->next(1)); +reg_def XMM11c( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()->next(2)); +reg_def XMM11d( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()->next(3)); +reg_def XMM11e( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()->next(4)); +reg_def XMM11f( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()->next(5)); +reg_def XMM11g( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()->next(6)); +reg_def XMM11h( SOC, SOC, Op_RegF, 11, xmm11.as_VMReg()->next(7)); + +reg_def XMM12 ( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()); +reg_def XMM12b( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()->next(1)); +reg_def XMM12c( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()->next(2)); +reg_def XMM12d( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()->next(3)); +reg_def XMM12e( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()->next(4)); +reg_def XMM12f( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()->next(5)); +reg_def XMM12g( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()->next(6)); +reg_def XMM12h( SOC, SOC, Op_RegF, 12, xmm12.as_VMReg()->next(7)); + +reg_def XMM13 ( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()); +reg_def XMM13b( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()->next(1)); +reg_def XMM13c( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()->next(2)); +reg_def XMM13d( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()->next(3)); +reg_def XMM13e( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()->next(4)); +reg_def XMM13f( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()->next(5)); +reg_def XMM13g( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()->next(6)); +reg_def XMM13h( SOC, SOC, Op_RegF, 13, xmm13.as_VMReg()->next(7)); + +reg_def XMM14 ( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()); +reg_def XMM14b( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()->next(1)); +reg_def XMM14c( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()->next(2)); +reg_def XMM14d( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()->next(3)); +reg_def XMM14e( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()->next(4)); +reg_def XMM14f( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()->next(5)); +reg_def XMM14g( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()->next(6)); +reg_def XMM14h( SOC, SOC, Op_RegF, 14, xmm14.as_VMReg()->next(7)); + +reg_def XMM15 ( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()); +reg_def XMM15b( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()->next(1)); +reg_def XMM15c( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()->next(2)); +reg_def XMM15d( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()->next(3)); +reg_def XMM15e( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()->next(4)); +reg_def XMM15f( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()->next(5)); +reg_def XMM15g( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()->next(6)); +reg_def XMM15h( SOC, SOC, Op_RegF, 15, xmm15.as_VMReg()->next(7)); #endif // _LP64 --- old/src/cpu/x86/vm/x86_64.ad 2014-04-24 15:53:07.000000000 -1000 +++ new/src/cpu/x86/vm/x86_64.ad 2014-04-24 15:53:07.000000000 -1000 @@ -67,66 +67,66 @@ // Turn off SOE in java-code due to frequent use of uncommon-traps. // Now that allocator is better, turn on RSI and RDI as SOE registers. -reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg()); -reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next()); +reg_def RAX (SOC, SOC, Op_RegI, 0, rax.as_VMReg()); +reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax.as_VMReg()->next()); -reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg()); -reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next()); +reg_def RCX (SOC, SOC, Op_RegI, 1, rcx.as_VMReg()); +reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx.as_VMReg()->next()); -reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg()); -reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next()); +reg_def RDX (SOC, SOC, Op_RegI, 2, rdx.as_VMReg()); +reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx.as_VMReg()->next()); -reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg()); -reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next()); +reg_def RBX (SOC, SOE, Op_RegI, 3, rbx.as_VMReg()); +reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx.as_VMReg()->next()); -reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg()); -reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next()); +reg_def RSP (NS, NS, Op_RegI, 4, rsp.as_VMReg()); +reg_def RSP_H(NS, NS, Op_RegI, 4, rsp.as_VMReg()->next()); // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code -reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg()); -reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next()); +reg_def RBP (NS, SOE, Op_RegI, 5, rbp.as_VMReg()); +reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp.as_VMReg()->next()); #ifdef _WIN64 -reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg()); -reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next()); +reg_def RSI (SOC, SOE, Op_RegI, 6, rsi.as_VMReg()); +reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi.as_VMReg()->next()); -reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg()); -reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next()); +reg_def RDI (SOC, SOE, Op_RegI, 7, rdi.as_VMReg()); +reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi.as_VMReg()->next()); #else -reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg()); -reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next()); +reg_def RSI (SOC, SOC, Op_RegI, 6, rsi.as_VMReg()); +reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi.as_VMReg()->next()); -reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg()); -reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next()); +reg_def RDI (SOC, SOC, Op_RegI, 7, rdi.as_VMReg()); +reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi.as_VMReg()->next()); #endif -reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg()); -reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next()); +reg_def R8 (SOC, SOC, Op_RegI, 8, r8.as_VMReg()); +reg_def R8_H (SOC, SOC, Op_RegI, 8, r8.as_VMReg()->next()); -reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg()); -reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next()); +reg_def R9 (SOC, SOC, Op_RegI, 9, r9.as_VMReg()); +reg_def R9_H (SOC, SOC, Op_RegI, 9, r9.as_VMReg()->next()); -reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg()); -reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next()); +reg_def R10 (SOC, SOC, Op_RegI, 10, r10.as_VMReg()); +reg_def R10_H(SOC, SOC, Op_RegI, 10, r10.as_VMReg()->next()); -reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg()); -reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next()); +reg_def R11 (SOC, SOC, Op_RegI, 11, r11.as_VMReg()); +reg_def R11_H(SOC, SOC, Op_RegI, 11, r11.as_VMReg()->next()); -reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg()); -reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next()); +reg_def R12 (SOC, SOE, Op_RegI, 12, r12.as_VMReg()); +reg_def R12_H(SOC, SOE, Op_RegI, 12, r12.as_VMReg()->next()); -reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg()); -reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next()); +reg_def R13 (SOC, SOE, Op_RegI, 13, r13.as_VMReg()); +reg_def R13_H(SOC, SOE, Op_RegI, 13, r13.as_VMReg()->next()); -reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg()); -reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next()); +reg_def R14 (SOC, SOE, Op_RegI, 14, r14.as_VMReg()); +reg_def R14_H(SOC, SOE, Op_RegI, 14, r14.as_VMReg()->next()); -reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg()); -reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next()); +reg_def R15 (SOC, SOE, Op_RegI, 15, r15.as_VMReg()); +reg_def R15_H(SOC, SOE, Op_RegI, 15, r15.as_VMReg()->next()); // Floating Point Registers --- old/src/share/vm/asm/register.hpp 2014-04-24 15:53:08.000000000 -1000 +++ new/src/share/vm/asm/register.hpp 2014-04-24 15:53:08.000000000 -1000 @@ -27,72 +27,30 @@ #include "utilities/top.hpp" -// Use AbstractRegister as shortcut -class AbstractRegisterImpl; -typedef AbstractRegisterImpl* AbstractRegister; - - -// The super class for platform specific registers. Instead of using value objects, -// registers are implemented as pointers. Subclassing is used so all registers can -// use the debugging suport below. No virtual functions are used for efficiency. -// They are canonicalized; i.e., registers are equal if their pointers are equal, -// and vice versa. A concrete implementation may just map the register onto 'this'. - -class AbstractRegisterImpl { - protected: - int value() const { return (int)(intx)this; } +/** + * The super class for platform specific registers. + */ +class AbstractRegister { + private: + int _encoding; + + public: + AbstractRegister(int encoding) : _encoding(encoding) {} + + int encoding() const { + return _encoding; + } + + bool const operator==(const AbstractRegister& rhs) const { + return encoding() == rhs.encoding(); + } + + bool const operator!=(const AbstractRegister& rhs) const { + return encoding() != rhs.encoding(); + } }; -// -// Macros for use in defining Register instances. We'd like to be -// able to simply define const instances of the RegisterImpl* for each -// of the registers needed on a system in a header file. However many -// compilers don't handle this very well and end up producing a -// private definition in every file which includes the header file. -// Along with the static constructors necessary for initialization it -// can consume a significant amount of space in the result library. -// -// The following macros allow us to declare the instance in a .hpp and -// produce an enumeration value which has the same number. Then in a -// .cpp the the register instance can be defined using the enumeration -// value. This avoids the use of static constructors and multiple -// definitions per .cpp. In addition #defines for the register can be -// produced so that the constant registers can be inlined. These -// macros should not be used inside other macros, because you may get -// multiple evaluations of the macros which can give bad results. -// -// Here are some example uses and expansions. Note that the macro -// invocation is terminated with a ;. -// -// CONSTANT_REGISTER_DECLARATION(Register, G0, 0); -// -// extern const Register G0 ; -// enum { G0_RegisterEnumValue = 0 } ; -// -// REGISTER_DECLARATION(Register, Gmethod, G5); -// -// extern const Register Gmethod ; -// enum { Gmethod_RegisterEnumValue = G5_RegisterEnumValue } ; -// -// REGISTER_DEFINITION(Register, G0); -// -// const Register G0 = ( ( Register ) G0_RegisterEnumValue ) ; -// - -#define AS_REGISTER(type,name) ((type)name##_##type##EnumValue) - -#define CONSTANT_REGISTER_DECLARATION(type, name, value) \ -extern const type name; \ -enum { name##_##type##EnumValue = (value) } - -#define REGISTER_DECLARATION(type, name, value) \ -extern const type name; \ -enum { name##_##type##EnumValue = value##_##type##EnumValue } - -#define REGISTER_DEFINITION(type, name) \ -const type name = ((type)name##_##type##EnumValue) - #ifdef TARGET_ARCH_x86 # include "register_x86.hpp" #endif @@ -119,7 +77,7 @@ assert( a != b, err_msg_res("registers must be different: a=%d, b=%d", - a, b) + a.encoding(), b.encoding()) ); } @@ -133,7 +91,7 @@ a != b && a != c && b != c, err_msg_res("registers must be different: a=%d, b=%d, c=%d", - a, b, c) + a.encoding(), b.encoding(), c.encoding()) ); } @@ -149,7 +107,7 @@ && b != c && b != d && c != d, err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d", - a, b, c, d) + a.encoding(), b.encoding(), c.encoding(), d.encoding()) ); } @@ -167,7 +125,7 @@ && c != d && c != e && d != e, err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d", - a, b, c, d, e) + a.encoding(), b.encoding(), c.encoding(), d.encoding(), e.encoding()) ); } @@ -187,7 +145,7 @@ && d != e && d != f && e != f, err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d", - a, b, c, d, e, f) + a.encoding(), b.encoding(), c.encoding(), d.encoding(), e.encoding(), f.encoding()) ); } @@ -209,7 +167,7 @@ && e != f && e != g && f != g, err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d", - a, b, c, d, e, f, g) + a.encoding(), b.encoding(), c.encoding(), d.encoding(), e.encoding(), f.encoding(), g.encoding()) ); } @@ -233,7 +191,7 @@ && f != g && f != h && g != h, err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d", - a, b, c, d, e, f, g, h) + a.encoding(), b.encoding(), c.encoding(), d.encoding(), e.encoding(), f.encoding(), g.encoding(), h.encoding()) ); } @@ -259,7 +217,7 @@ && g != h && g != i && h != i, err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d", - a, b, c, d, e, f, g, h, i) + a.encoding(), b.encoding(), c.encoding(), d.encoding(), e.encoding(), f.encoding(), g.encoding(), h.encoding(), i.encoding()) ); } --- old/src/share/vm/c1/c1_FrameMap.cpp 2014-04-24 15:53:09.000000000 -1000 +++ new/src/share/vm/c1/c1_FrameMap.cpp 2014-04-24 15:53:09.000000000 -1000 @@ -327,7 +327,7 @@ VMReg FrameMap::regname(LIR_Opr opr) const { if (opr->is_single_cpu()) { assert(!opr->is_virtual(), "should not see virtual registers here"); - return opr->as_register()->as_VMReg(); + return opr->as_register().as_VMReg(); } else if (opr->is_single_stack()) { return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix())); } else if (opr->is_address()) { --- old/src/share/vm/c1/c1_FrameMap.hpp 2014-04-24 15:53:09.000000000 -1000 +++ new/src/share/vm/c1/c1_FrameMap.hpp 2014-04-24 15:53:09.000000000 -1000 @@ -128,15 +128,15 @@ static int cpu_reg2rnr (Register reg) { assert(_init_done, "tables not initialized"); - debug_only(cpu_range_check(reg->encoding());) - return _cpu_reg2rnr[reg->encoding()]; + debug_only(cpu_range_check(reg.encoding());) + return _cpu_reg2rnr[reg.encoding()]; } static void map_register(int rnr, Register reg) { debug_only(cpu_range_check(rnr);) - debug_only(cpu_range_check(reg->encoding());) + debug_only(cpu_range_check(reg.encoding());) _cpu_rnr2reg[rnr] = reg; - _cpu_reg2rnr[reg->encoding()] = rnr; + _cpu_reg2rnr[reg.encoding()] = rnr; } void update_reserved_argument_area_size (int size) { --- old/src/share/vm/c1/c1_LIR.cpp 2014-04-24 15:53:10.000000000 -1000 +++ new/src/share/vm/c1/c1_LIR.cpp 2014-04-24 15:53:10.000000000 -1000 @@ -1563,15 +1563,15 @@ } else if (is_virtual()) { out->print("R%d", vreg_number()); } else if (is_single_cpu()) { - out->print(as_register()->name()); + out->print(as_register().name()); } else if (is_double_cpu()) { - out->print(as_register_hi()->name()); - out->print(as_register_lo()->name()); + out->print(as_register_hi().name()); + out->print(as_register_lo().name()); #if defined(X86) } else if (is_single_xmm()) { - out->print(as_xmm_float_reg()->name()); + out->print(as_xmm_float_reg().name()); } else if (is_double_xmm()) { - out->print(as_xmm_double_reg()->name()); + out->print(as_xmm_double_reg().name()); } else if (is_single_fpu()) { out->print("fpu%d", fpu_regnr()); } else if (is_double_fpu()) { @@ -1583,9 +1583,9 @@ out->print("d%d", fpu_regnrLo() >> 1); #else } else if (is_single_fpu()) { - out->print(as_float_reg()->name()); + out->print(as_float_reg().name()); } else if (is_double_fpu()) { - out->print(as_double_reg()->name()); + out->print(as_double_reg().name()); #endif } else if (is_illegal()) { --- old/src/share/vm/c1/c1_LIRAssembler.cpp 2014-04-24 15:53:10.000000000 -1000 +++ new/src/share/vm/c1/c1_LIRAssembler.cpp 2014-04-24 15:53:10.000000000 -1000 @@ -873,7 +873,7 @@ VMReg r = v.reg(); if (!r->is_stack()) { stringStream st; - st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); + st.print("bad oop %s at %d", r->as_Register().name(), _masm->offset()); #ifdef SPARC _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__); #else --- old/src/share/vm/c1/c1_LinearScan.cpp 2014-04-24 15:53:11.000000000 -1000 +++ new/src/share/vm/c1/c1_LinearScan.cpp 2014-04-24 15:53:11.000000000 -1000 @@ -2614,7 +2614,7 @@ #ifdef X86 } else if (opr->is_single_xmm()) { - VMReg rname = opr->as_xmm_float_reg()->as_VMReg(); + VMReg rname = opr->as_xmm_float_reg().as_VMReg(); LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname)); scope_values->append(sv); @@ -2683,12 +2683,12 @@ } else if (opr->is_double_cpu()) { #ifdef _LP64 - VMReg rname_first = opr->as_register_lo()->as_VMReg(); + VMReg rname_first = opr->as_register_lo().as_VMReg(); first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first)); second = _int_0_scope_value; #else - VMReg rname_first = opr->as_register_lo()->as_VMReg(); - VMReg rname_second = opr->as_register_hi()->as_VMReg(); + VMReg rname_first = opr->as_register_lo().as_VMReg(); + VMReg rname_second = opr->as_register_hi().as_VMReg(); if (hi_word_offset_in_bytes < lo_word_offset_in_bytes) { // lo/hi and swapped relative to first and second, so swap them @@ -2705,7 +2705,7 @@ #ifdef X86 } else if (opr->is_double_xmm()) { assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); - VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); + VMReg rname_first = opr->as_xmm_double_reg().as_VMReg(); # ifdef _LP64 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); second = _int_0_scope_value; --- old/src/share/vm/runtime/vmStructs.cpp 2014-04-24 15:53:12.000000000 -1000 +++ new/src/share/vm/runtime/vmStructs.cpp 2014-04-24 15:53:12.000000000 -1000 @@ -2586,7 +2586,7 @@ /* Calling convention constants */ \ /********************************/ \ \ - declare_constant(RegisterImpl::number_of_registers) \ + declare_constant(Register::number_of_registers) \ declare_constant(ConcreteRegisterImpl::number_of_registers) \ declare_preprocessor_constant("REG_COUNT", REG_COUNT) \ declare_c2_preprocessor_constant("SAVED_ON_ENTRY_REG_COUNT", SAVED_ON_ENTRY_REG_COUNT) \ --- old/src/cpu/x86/vm/register_definitions_x86.cpp 2014-04-24 15:53:13.000000000 -1000 +++ /dev/null 2014-04-24 15:53:13.000000000 -1000 @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "asm/assembler.hpp" -#include "asm/register.hpp" -#include "register_x86.hpp" -#ifdef TARGET_ARCH_x86 -# include "interp_masm_x86.hpp" -#endif - -REGISTER_DEFINITION(Register, noreg); -REGISTER_DEFINITION(Register, rax); -REGISTER_DEFINITION(Register, rcx); -REGISTER_DEFINITION(Register, rdx); -REGISTER_DEFINITION(Register, rbx); -REGISTER_DEFINITION(Register, rsp); -REGISTER_DEFINITION(Register, rbp); -REGISTER_DEFINITION(Register, rsi); -REGISTER_DEFINITION(Register, rdi); -#ifdef AMD64 -REGISTER_DEFINITION(Register, r8); -REGISTER_DEFINITION(Register, r9); -REGISTER_DEFINITION(Register, r10); -REGISTER_DEFINITION(Register, r11); -REGISTER_DEFINITION(Register, r12); -REGISTER_DEFINITION(Register, r13); -REGISTER_DEFINITION(Register, r14); -REGISTER_DEFINITION(Register, r15); -#endif // AMD64 - -REGISTER_DEFINITION(XMMRegister, xnoreg); -REGISTER_DEFINITION(XMMRegister, xmm0 ); -REGISTER_DEFINITION(XMMRegister, xmm1 ); -REGISTER_DEFINITION(XMMRegister, xmm2 ); -REGISTER_DEFINITION(XMMRegister, xmm3 ); -REGISTER_DEFINITION(XMMRegister, xmm4 ); -REGISTER_DEFINITION(XMMRegister, xmm5 ); -REGISTER_DEFINITION(XMMRegister, xmm6 ); -REGISTER_DEFINITION(XMMRegister, xmm7 ); -#ifdef AMD64 -REGISTER_DEFINITION(XMMRegister, xmm8); -REGISTER_DEFINITION(XMMRegister, xmm9); -REGISTER_DEFINITION(XMMRegister, xmm10); -REGISTER_DEFINITION(XMMRegister, xmm11); -REGISTER_DEFINITION(XMMRegister, xmm12); -REGISTER_DEFINITION(XMMRegister, xmm13); -REGISTER_DEFINITION(XMMRegister, xmm14); -REGISTER_DEFINITION(XMMRegister, xmm15); - -REGISTER_DEFINITION(Register, c_rarg0); -REGISTER_DEFINITION(Register, c_rarg1); -REGISTER_DEFINITION(Register, c_rarg2); -REGISTER_DEFINITION(Register, c_rarg3); - -REGISTER_DEFINITION(XMMRegister, c_farg0); -REGISTER_DEFINITION(XMMRegister, c_farg1); -REGISTER_DEFINITION(XMMRegister, c_farg2); -REGISTER_DEFINITION(XMMRegister, c_farg3); - -// Non windows OS's have a few more argument registers -#ifndef _WIN64 -REGISTER_DEFINITION(Register, c_rarg4); -REGISTER_DEFINITION(Register, c_rarg5); - -REGISTER_DEFINITION(XMMRegister, c_farg4); -REGISTER_DEFINITION(XMMRegister, c_farg5); -REGISTER_DEFINITION(XMMRegister, c_farg6); -REGISTER_DEFINITION(XMMRegister, c_farg7); -#endif /* _WIN64 */ - -REGISTER_DEFINITION(Register, j_rarg0); -REGISTER_DEFINITION(Register, j_rarg1); -REGISTER_DEFINITION(Register, j_rarg2); -REGISTER_DEFINITION(Register, j_rarg3); -REGISTER_DEFINITION(Register, j_rarg4); -REGISTER_DEFINITION(Register, j_rarg5); - -REGISTER_DEFINITION(XMMRegister, j_farg0); -REGISTER_DEFINITION(XMMRegister, j_farg1); -REGISTER_DEFINITION(XMMRegister, j_farg2); -REGISTER_DEFINITION(XMMRegister, j_farg3); -REGISTER_DEFINITION(XMMRegister, j_farg4); -REGISTER_DEFINITION(XMMRegister, j_farg5); -REGISTER_DEFINITION(XMMRegister, j_farg6); -REGISTER_DEFINITION(XMMRegister, j_farg7); - -REGISTER_DEFINITION(Register, rscratch1); -REGISTER_DEFINITION(Register, rscratch2); - -REGISTER_DEFINITION(Register, r12_heapbase); -REGISTER_DEFINITION(Register, r15_thread); -#endif // AMD64 - -REGISTER_DEFINITION(MMXRegister, mnoreg ); -REGISTER_DEFINITION(MMXRegister, mmx0 ); -REGISTER_DEFINITION(MMXRegister, mmx1 ); -REGISTER_DEFINITION(MMXRegister, mmx2 ); -REGISTER_DEFINITION(MMXRegister, mmx3 ); -REGISTER_DEFINITION(MMXRegister, mmx4 ); -REGISTER_DEFINITION(MMXRegister, mmx5 ); -REGISTER_DEFINITION(MMXRegister, mmx6 ); -REGISTER_DEFINITION(MMXRegister, mmx7 ); - -// JSR 292 -REGISTER_DEFINITION(Register, rbp_mh_SP_save);