# HG changeset patch # User Monica Beckwith # Date 1593011846 -7200 # Wed Jun 24 17:17:26 2020 +0200 # Node ID b54b2d78f85658725248c3b94e520e36c499b7ad # Parent ab0b5603485256398d3c37d5d68913747c52c4a3 8248238: Adding Windows support to OpenJDK on AArch64 Summary: LP64 vs LLP64 changes to add Windows support Contributed-by: Monica Beckwith , Ludovic Henry Reviewed-by: diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1626,7 +1626,7 @@ C2_MacroAssembler _masm(&cbuf); // n.b. frame size includes space for return pc and rfp - const long framesize = C->output()->frame_size_in_bytes(); + const int64_t framesize = C->output()->frame_size_in_bytes(); assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); // insert a nop at the start of the prolog so we can patch in a @@ -3118,7 +3118,7 @@ enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{ C2_MacroAssembler _masm(&cbuf); - u_int32_t con = (u_int32_t)$src$$constant; + uint32_t con = (uint32_t)$src$$constant; Register dst_reg = as_Register($dst$$reg); if (con == 0) { __ movw(dst_reg, zr); @@ -3130,7 +3130,7 @@ enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{ C2_MacroAssembler _masm(&cbuf); Register dst_reg = as_Register($dst$$reg); - u_int64_t con = (u_int64_t)$src$$constant; + uint64_t con = (uint64_t)$src$$constant; if (con == 0) { __ mov(dst_reg, zr); } else { @@ -3155,7 +3155,7 @@ if (con < (address)(uintptr_t)os::vm_page_size()) { __ mov(dst_reg, con); } else { - unsigned long offset; + uint64_t offset; __ adrp(dst_reg, con, offset); __ add(dst_reg, dst_reg, offset); } @@ -3172,7 +3172,7 @@ enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{ C2_MacroAssembler _masm(&cbuf); Register dst_reg = as_Register($dst$$reg); - __ mov(dst_reg, (u_int64_t)1); + __ mov(dst_reg, (uint64_t)1); %} enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{ @@ -3297,7 +3297,7 @@ enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{ C2_MacroAssembler _masm(&cbuf); Register reg1 = as_Register($src1$$reg); - u_int32_t val = (u_int32_t)$src2$$constant; + uint32_t val = (uint32_t)$src2$$constant; __ movw(rscratch1, val); __ cmpw(reg1, rscratch1); %} @@ -3319,7 +3319,7 @@ __ adds(zr, reg, -val); } else { // aargh, Long.MIN_VALUE is a special case - __ orr(rscratch1, zr, (u_int64_t)val); + __ orr(rscratch1, zr, (uint64_t)val); __ subs(zr, reg, rscratch1); } %} @@ -3327,7 +3327,7 @@ enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{ C2_MacroAssembler _masm(&cbuf); Register reg1 = as_Register($src1$$reg); - u_int64_t val = (u_int64_t)$src2$$constant; + uint64_t val = (uint64_t)$src2$$constant; __ mov(rscratch1, val); __ cmp(reg1, rscratch1); %} @@ -4227,7 +4227,7 @@ // 32 bit integer valid for add sub immediate operand immIAddSub() %{ - predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int())); + predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int())); match(ConI); op_cost(0); format %{ %} @@ -4238,7 +4238,7 @@ // TODO -- check this is right when e.g the mask is 0x80000000 operand immILog() %{ - predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int())); + predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int())); match(ConI); op_cost(0); @@ -4316,7 +4316,7 @@ // 64 bit integer valid for logical immediate operand immLLog() %{ - predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long())); + predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long())); match(ConL); op_cost(0); format %{ %} @@ -7112,7 +7112,7 @@ match(Set dst src); ins_cost(INSN_COST); - format %{ "mov $dst, $src\t# long" %} + format %{ "mov $dst, $src\t# int64_t" %} ins_encode( aarch64_enc_mov_imm(dst, src) ); @@ -8248,7 +8248,7 @@ match(Set dst (CastX2P src)); ins_cost(INSN_COST); - format %{ "mov $dst, $src\t# long -> ptr" %} + format %{ "mov $dst, $src\t# int64_t -> ptr" %} ins_encode %{ if ($dst$$reg != $src$$reg) { @@ -8263,7 +8263,7 @@ match(Set dst (CastP2X src)); ins_cost(INSN_COST); - format %{ "mov $dst, $src\t# ptr -> long" %} + format %{ "mov $dst, $src\t# ptr -> int64_t" %} ins_encode %{ if ($dst$$reg != $src$$reg) { @@ -8601,7 +8601,7 @@ effect(KILL cr); format %{ - "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval" + "cmpxchg $mem, $oldval, $newval\t# (int64_t) if $mem == $oldval then $mem <-- $newval" "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" %} @@ -8716,7 +8716,7 @@ effect(KILL cr); format %{ - "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval" + "cmpxchg_acq $mem, $oldval, $newval\t# (int64_t) if $mem == $oldval then $mem <-- $newval" "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" %} @@ -8833,7 +8833,7 @@ ins_cost(2 * VOLATILE_REF_COST); effect(TEMP_DEF res, KILL cr); format %{ - "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval" + "cmpxchg $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval" %} ins_encode %{ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, @@ -8931,7 +8931,7 @@ ins_cost(VOLATILE_REF_COST); effect(TEMP_DEF res, KILL cr); format %{ - "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval" + "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval" %} ins_encode %{ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, @@ -9030,7 +9030,7 @@ ins_cost(2 * VOLATILE_REF_COST); effect(KILL cr); format %{ - "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval" + "cmpxchg $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval" "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" %} ins_encode %{ @@ -9137,7 +9137,7 @@ ins_cost(VOLATILE_REF_COST); effect(KILL cr); format %{ - "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval" + "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval" "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" %} ins_encode %{ @@ -9659,7 +9659,7 @@ match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2))); ins_cost(INSN_COST * 2); - format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long" %} + format %{ "csel $dst, $src2, $src1 $cmp\t# signed, int64_t" %} ins_encode %{ __ csel(as_Register($dst$$reg), @@ -9675,7 +9675,7 @@ match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2))); ins_cost(INSN_COST * 2); - format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long" %} + format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, int64_t" %} ins_encode %{ __ csel(as_Register($dst$$reg), @@ -9693,7 +9693,7 @@ match(Set dst (CMoveL (Binary cmp cr) (Binary src zero))); ins_cost(INSN_COST * 2); - format %{ "csel $dst, zr, $src $cmp\t# signed, long" %} + format %{ "csel $dst, zr, $src $cmp\t# signed, int64_t" %} ins_encode %{ __ csel(as_Register($dst$$reg), @@ -9709,7 +9709,7 @@ match(Set dst (CMoveL (Binary cmp cr) (Binary src zero))); ins_cost(INSN_COST * 2); - format %{ "csel $dst, zr, $src $cmp\t# unsigned, long" %} + format %{ "csel $dst, zr, $src $cmp\t# unsigned, int64_t" %} ins_encode %{ __ csel(as_Register($dst$$reg), @@ -9725,7 +9725,7 @@ match(Set dst (CMoveL (Binary cmp cr) (Binary zero src))); ins_cost(INSN_COST * 2); - format %{ "csel $dst, $src, zr $cmp\t# signed, long" %} + format %{ "csel $dst, $src, zr $cmp\t# signed, int64_t" %} ins_encode %{ __ csel(as_Register($dst$$reg), @@ -9741,7 +9741,7 @@ match(Set dst (CMoveL (Binary cmp cr) (Binary zero src))); ins_cost(INSN_COST * 2); - format %{ "csel $dst, $src, zr $cmp\t# unsigned, long" %} + format %{ "csel $dst, $src, zr $cmp\t# unsigned, int64_t" %} ins_encode %{ __ csel(as_Register($dst$$reg), @@ -10286,7 +10286,7 @@ match(Set dst (SubL zero src)); ins_cost(INSN_COST); - format %{ "neg $dst, $src\t# long" %} + format %{ "neg $dst, $src\t# int64_t" %} ins_encode %{ __ neg(as_Register($dst$$reg), @@ -11920,7 +11920,7 @@ format %{ "ubfxw $dst, $src, $rshift, $mask" %} ins_encode %{ int rshift = $rshift$$constant & 31; - long mask = $mask$$constant; + int64_t mask = $mask$$constant; int width = exact_log2(mask+1); __ ubfxw(as_Register($dst$$reg), as_Register($src$$reg), rshift, width); @@ -11937,7 +11937,7 @@ format %{ "ubfx $dst, $src, $rshift, $mask" %} ins_encode %{ int rshift = $rshift$$constant & 63; - long mask = $mask$$constant; + int64_t mask = $mask$$constant; int width = exact_log2_long(mask+1); __ ubfx(as_Register($dst$$reg), as_Register($src$$reg), rshift, width); @@ -11957,7 +11957,7 @@ format %{ "ubfx $dst, $src, $rshift, $mask" %} ins_encode %{ int rshift = $rshift$$constant & 31; - long mask = $mask$$constant; + int64_t mask = $mask$$constant; int width = exact_log2(mask+1); __ ubfx(as_Register($dst$$reg), as_Register($src$$reg), rshift, width); @@ -11976,7 +11976,7 @@ format %{ "ubfizw $dst, $src, $lshift, $mask" %} ins_encode %{ int lshift = $lshift$$constant & 31; - long mask = $mask$$constant; + int64_t mask = $mask$$constant; int width = exact_log2(mask+1); __ ubfizw(as_Register($dst$$reg), as_Register($src$$reg), lshift, width); @@ -11994,7 +11994,7 @@ format %{ "ubfiz $dst, $src, $lshift, $mask" %} ins_encode %{ int lshift = $lshift$$constant & 63; - long mask = $mask$$constant; + int64_t mask = $mask$$constant; int width = exact_log2_long(mask+1); __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), lshift, width); @@ -12012,7 +12012,7 @@ format %{ "ubfiz $dst, $src, $lshift, $mask" %} ins_encode %{ int lshift = $lshift$$constant & 63; - long mask = $mask$$constant; + int64_t mask = $mask$$constant; int width = exact_log2(mask+1); __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), lshift, width); @@ -13249,7 +13249,7 @@ ins_encode %{ __ andw(as_Register($dst$$reg), as_Register($src1$$reg), - (unsigned long)($src2$$constant)); + (uint64_t)($src2$$constant)); %} ins_pipe(ialu_reg_imm); @@ -13281,7 +13281,7 @@ ins_encode %{ __ orrw(as_Register($dst$$reg), as_Register($src1$$reg), - (unsigned long)($src2$$constant)); + (uint64_t)($src2$$constant)); %} ins_pipe(ialu_reg_imm); @@ -13313,7 +13313,7 @@ ins_encode %{ __ eorw(as_Register($dst$$reg), as_Register($src1$$reg), - (unsigned long)($src2$$constant)); + (uint64_t)($src2$$constant)); %} ins_pipe(ialu_reg_imm); @@ -13346,7 +13346,7 @@ ins_encode %{ __ andr(as_Register($dst$$reg), as_Register($src1$$reg), - (unsigned long)($src2$$constant)); + (uint64_t)($src2$$constant)); %} ins_pipe(ialu_reg_imm); @@ -13378,7 +13378,7 @@ ins_encode %{ __ orr(as_Register($dst$$reg), as_Register($src1$$reg), - (unsigned long)($src2$$constant)); + (uint64_t)($src2$$constant)); %} ins_pipe(ialu_reg_imm); @@ -13410,7 +13410,7 @@ ins_encode %{ __ eor(as_Register($dst$$reg), as_Register($src1$$reg), - (unsigned long)($src2$$constant)); + (uint64_t)($src2$$constant)); %} ins_pipe(ialu_reg_imm); @@ -13859,8 +13859,8 @@ instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr) %{ - predicate((u_int64_t)n->in(2)->get_long() - < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord)); + predicate((uint64_t)n->in(2)->get_long() + < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord)); match(Set dummy (ClearArray cnt base)); effect(USE_KILL base); @@ -13868,7 +13868,7 @@ format %{ "ClearArray $cnt, $base" %} ins_encode %{ - __ zero_words($base$$Register, (u_int64_t)$cnt$$constant); + __ zero_words($base$$Register, (uint64_t)$cnt$$constant); %} ins_pipe(pipe_class_memory); @@ -13907,7 +13907,7 @@ %{ match(Set cr (OverflowAddL op1 op2)); - format %{ "cmn $op1, $op2\t# overflow check long" %} + format %{ "cmn $op1, $op2\t# overflow check int64_t" %} ins_cost(INSN_COST); ins_encode %{ __ cmn($op1$$Register, $op2$$Register); @@ -13920,7 +13920,7 @@ %{ match(Set cr (OverflowAddL op1 op2)); - format %{ "cmn $op1, $op2\t# overflow check long" %} + format %{ "cmn $op1, $op2\t# overflow check int64_t" %} ins_cost(INSN_COST); ins_encode %{ __ cmn($op1$$Register, $op2$$constant); @@ -13959,7 +13959,7 @@ %{ match(Set cr (OverflowSubL op1 op2)); - format %{ "cmp $op1, $op2\t# overflow check long" %} + format %{ "cmp $op1, $op2\t# overflow check int64_t" %} ins_cost(INSN_COST); ins_encode %{ __ cmp($op1$$Register, $op2$$Register); @@ -13972,7 +13972,7 @@ %{ match(Set cr (OverflowSubL op1 op2)); - format %{ "cmp $op1, $op2\t# overflow check long" %} + format %{ "cmp $op1, $op2\t# overflow check int64_t" %} ins_cost(INSN_COST); ins_encode %{ __ subs(zr, $op1$$Register, $op2$$constant); @@ -13998,7 +13998,7 @@ %{ match(Set cr (OverflowSubL zero op1)); - format %{ "cmp zr, $op1\t# overflow check long" %} + format %{ "cmp zr, $op1\t# overflow check int64_t" %} ins_cost(INSN_COST); ins_encode %{ __ cmp(zr, $op1$$Register); @@ -14054,7 +14054,7 @@ %{ match(Set cr (OverflowMulL op1 op2)); - format %{ "mul rscratch1, $op1, $op2\t#overflow check long\n\t" + format %{ "mul rscratch1, $op1, $op2\t#overflow check int64_t\n\t" "smulh rscratch2, $op1, $op2\n\t" "cmp rscratch2, rscratch1, ASR #63\n\t" "movw rscratch1, #0x80000000\n\t" @@ -14080,7 +14080,7 @@ || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow); effect(USE labl, KILL cr); - format %{ "mul rscratch1, $op1, $op2\t#overflow check long\n\t" + format %{ "mul rscratch1, $op1, $op2\t#overflow check int64_t\n\t" "smulh rscratch2, $op1, $op2\n\t" "cmp rscratch2, rscratch1, ASR #63\n\t" "b$cmp $labl" %} @@ -14844,7 +14844,7 @@ effect(USE labl); ins_cost(BRANCH_COST); - format %{ "cb$cmp $op1, $labl # long" %} + format %{ "cb$cmp $op1, $labl # int64_t" %} ins_encode %{ Label* L = $labl$$label; Assembler::Condition cond = @@ -14911,7 +14911,7 @@ effect(USE labl); ins_cost(BRANCH_COST); - format %{ "cb$cmp $op1, $labl # long" %} + format %{ "cb$cmp $op1, $labl # int64_t" %} ins_encode %{ Label* L = $labl$$label; Assembler::Condition cond = @@ -14976,7 +14976,7 @@ (/*is_32*/false, n->in(1)->in(2)->get_long())); ins_cost(INSN_COST); - format %{ "tst $op1, $op2 # long" %} + format %{ "tst $op1, $op2 # int64_t" %} ins_encode %{ __ tst($op1$$Register, $op2$$constant); %} @@ -15000,7 +15000,7 @@ match(Set cr (CmpL (AndL op1 op2) op3)); ins_cost(INSN_COST); - format %{ "tst $op1, $op2 # long" %} + format %{ "tst $op1, $op2 # int64_t" %} ins_encode %{ __ tst($op1$$Register, $op2$$Register); %} diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp @@ -31,7 +31,7 @@ #include "interpreter/interpreter.hpp" #ifndef PRODUCT -const unsigned long Assembler::asm_bp = 0x00007fffee09ac88; +const uint64_t Assembler::asm_bp = 0x00007fffee09ac88; #endif #include "compiler/disassembler.hpp" @@ -1493,7 +1493,7 @@ Disassembler::decode((address)start, (address)start + len); } - JNIEXPORT void das1(unsigned long insn) { + JNIEXPORT void das1(uint64_t insn) { das(insn, 1); } } @@ -1532,7 +1532,7 @@ } } -void Assembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) { +void Assembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { ShouldNotReachHere(); } @@ -1541,7 +1541,7 @@ #define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use) void Assembler::adr(Register Rd, address adr) { - long offset = adr - pc(); + int64_t offset = adr - pc(); int offset_lo = offset & 3; offset >>= 2; starti; @@ -1552,7 +1552,7 @@ void Assembler::_adrp(Register Rd, address adr) { uint64_t pc_page = (uint64_t)pc() >> 12; uint64_t adr_page = (uint64_t)adr >> 12; - long offset = adr_page - pc_page; + int64_t offset = adr_page - pc_page; int offset_lo = offset & 3; offset >>= 2; starti; @@ -1701,9 +1701,9 @@ srf(Rn, 5); } -bool Assembler::operand_valid_for_add_sub_immediate(long imm) { +bool Assembler::operand_valid_for_add_sub_immediate(int64_t imm) { bool shift = false; - unsigned long uimm = uabs(imm); + uint64_t uimm = uabs(imm); if (uimm < (1 << 12)) return true; if (uimm < (1 << 24) diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -199,7 +199,7 @@ return extend(uval, msb - lsb); } - static void patch(address a, int msb, int lsb, unsigned long val) { + static void patch(address a, int msb, int lsb, uint64_t val) { int nbits = msb - lsb + 1; guarantee(val < (1U << nbits), "Field too big for insn"); assert_cond(msb >= lsb); @@ -212,9 +212,9 @@ *(unsigned *)a = target; } - static void spatch(address a, int msb, int lsb, long val) { + static void spatch(address a, int msb, int lsb, int64_t val) { int nbits = msb - lsb + 1; - long chk = val >> (nbits - 1); + int64_t chk = val >> (nbits - 1); guarantee (chk == -1 || chk == 0, "Field too big for insn"); unsigned uval = val; unsigned mask = (1U << nbits) - 1; @@ -245,9 +245,9 @@ f(val, bit, bit); } - void sf(long val, int msb, int lsb) { + void sf(int64_t val, int msb, int lsb) { int nbits = msb - lsb + 1; - long chk = val >> (nbits - 1); + int64_t chk = val >> (nbits - 1); guarantee (chk == -1 || chk == 0, "Field too big for insn"); unsigned uval = val; unsigned mask = (1U << nbits) - 1; @@ -357,7 +357,7 @@ private: Register _base; Register _index; - long _offset; + int64_t _offset; enum mode _mode; extend _ext; @@ -380,9 +380,9 @@ : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { } Address(Register r, int o) : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } - Address(Register r, long o) + Address(Register r, int64_t o) : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } - Address(Register r, unsigned long o) + Address(Register r, uint64_t o) : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } #ifdef ASSERT Address(Register r, ByteSize disp) @@ -422,7 +422,7 @@ "wrong mode"); return _base; } - long offset() const { + int64_t offset() const { return _offset; } Register index() const { @@ -554,7 +554,7 @@ void lea(MacroAssembler *, Register) const; - static bool offset_ok_for_immed(long offset, int shift) { + static bool offset_ok_for_immed(int64_t offset, int shift) { unsigned mask = (1 << shift) - 1; if (offset < 0 || offset & mask) { return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset @@ -616,10 +616,10 @@ class Assembler : public AbstractAssembler { #ifndef PRODUCT - static const unsigned long asm_bp; + static const uint64_t asm_bp; void emit_long(jint x) { - if ((unsigned long)pc() == asm_bp) + if ((uint64_t)pc() == asm_bp) asm volatile ("nop"); AbstractAssembler::emit_int32(x); } @@ -670,7 +670,7 @@ void f(unsigned val, int msb) { current->f(val, msb, msb); } - void sf(long val, int msb, int lsb) { + void sf(int64_t val, int msb, int lsb) { current->sf(val, msb, lsb); } void rf(Register reg, int lsb) { @@ -720,7 +720,7 @@ wrap_label(Rd, L, &Assembler::_adrp); } - void adrp(Register Rd, const Address &dest, unsigned long &offset); + void adrp(Register Rd, const Address &dest, uint64_t &offset); #undef INSN @@ -846,7 +846,7 @@ // architecture. In debug mode we shrink it in order to test // trampolines, but not so small that branches in the interpreter // are out of range. - static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M); + static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M); static bool reachable_from_branch_at(address branch, address target) { return uabs(target - branch) < branch_range; @@ -856,7 +856,7 @@ #define INSN(NAME, opcode) \ void NAME(address dest) { \ starti; \ - long offset = (dest - pc()) >> 2; \ + int64_t offset = (dest - pc()) >> 2; \ DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \ f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \ } \ @@ -873,7 +873,7 @@ // Compare & branch (immediate) #define INSN(NAME, opcode) \ void NAME(Register Rt, address dest) { \ - long offset = (dest - pc()) >> 2; \ + int64_t offset = (dest - pc()) >> 2; \ starti; \ f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \ } \ @@ -891,7 +891,7 @@ // Test & branch (immediate) #define INSN(NAME, opcode) \ void NAME(Register Rt, int bitpos, address dest) { \ - long offset = (dest - pc()) >> 2; \ + int64_t offset = (dest - pc()) >> 2; \ int b5 = bitpos >> 5; \ bitpos &= 0x1f; \ starti; \ @@ -912,7 +912,7 @@ {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV}; void br(Condition cond, address dest) { - long offset = (dest - pc()) >> 2; + int64_t offset = (dest - pc()) >> 2; starti; f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0); } @@ -1292,7 +1292,7 @@ // Load register (literal) #define INSN(NAME, opc, V) \ void NAME(Register Rt, address dest) { \ - long offset = (dest - pc()) >> 2; \ + int64_t offset = (dest - pc()) >> 2; \ starti; \ f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ sf(offset, 23, 5); \ @@ -1317,7 +1317,7 @@ #define INSN(NAME, opc, V) \ void NAME(FloatRegister Rt, address dest) { \ - long offset = (dest - pc()) >> 2; \ + int64_t offset = (dest - pc()) >> 2; \ starti; \ f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ sf(offset, 23, 5); \ @@ -1332,7 +1332,7 @@ #define INSN(NAME, opc, V) \ void NAME(address dest, prfop op = PLDL1KEEP) { \ - long offset = (dest - pc()) >> 2; \ + int64_t offset = (dest - pc()) >> 2; \ starti; \ f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ sf(offset, 23, 5); \ @@ -1408,7 +1408,7 @@ assert(size == 0b10 || size == 0b11, "bad operand size in ldr"); assert(op == 0b01, "literal form can only be used with loads"); f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24); - long offset = (adr.target() - pc()) >> 2; + int64_t offset = (adr.target() - pc()) >> 2; sf(offset, 23, 5); code_section()->relocate(pc(), adr.rspec()); return; @@ -2683,7 +2683,7 @@ virtual void bang_stack_with_offset(int offset); static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm); - static bool operand_valid_for_add_sub_immediate(long imm); + static bool operand_valid_for_add_sub_immediate(int64_t imm); static bool operand_valid_for_float_immediate(double imm); void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -1352,7 +1352,7 @@ __ load_klass(klass_RInfo, obj); if (k->is_loaded()) { // See if we get an immediate positive hit - __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset()))); + __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset()))); __ cmp(k_RInfo, rscratch1); if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { __ br(Assembler::NE, *failure_target); @@ -2016,7 +2016,7 @@ } else if (code == lir_cmp_l2i) { Label done; __ cmp(left->as_register_lo(), right->as_register_lo()); - __ mov(dst->as_register(), (u_int64_t)-1L); + __ mov(dst->as_register(), (uint64_t)-1L); __ br(Assembler::LT, done); __ csinc(dst->as_register(), zr, zr, Assembler::EQ); __ bind(done); @@ -2675,7 +2675,7 @@ Register res = op->result_opr()->as_register(); assert_different_registers(val, crc, res); - unsigned long offset; + uint64_t offset; __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset); if (offset) __ add(res, res, offset); diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp --- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp @@ -653,14 +653,14 @@ #define DESCRIBE_FP_OFFSET(name) \ { \ - unsigned long *p = (unsigned long *)fp; \ - printf("0x%016lx 0x%016lx %s\n", (unsigned long)(p + frame::name##_offset), \ + uint64_t *p = (uint64_t *)fp; \ + printf("0x%016lx 0x%016lx %s\n", (uint64_t)(p + frame::name##_offset), \ p[frame::name##_offset], #name); \ } -static __thread unsigned long nextfp; -static __thread unsigned long nextpc; -static __thread unsigned long nextsp; +static __thread uint64_t nextfp; +static __thread uint64_t nextpc; +static __thread uint64_t nextsp; static __thread RegisterMap *reg_map; static void printbc(Method *m, intptr_t bcx) { @@ -679,7 +679,7 @@ printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name); } -void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned long bcx) { +void internal_pf(uint64_t sp, uint64_t fp, uint64_t pc, uint64_t bcx) { if (! fp) return; @@ -693,7 +693,7 @@ DESCRIBE_FP_OFFSET(interpreter_frame_locals); DESCRIBE_FP_OFFSET(interpreter_frame_bcp); DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp); - unsigned long *p = (unsigned long *)fp; + uint64_t *p = (uint64_t *)fp; // We want to see all frames, native and Java. For compiled and // interpreted frames we have special information that allows us to @@ -703,16 +703,16 @@ if (this_frame.is_compiled_frame() || this_frame.is_interpreted_frame()) { frame sender = this_frame.sender(reg_map); - nextfp = (unsigned long)sender.fp(); - nextpc = (unsigned long)sender.pc(); - nextsp = (unsigned long)sender.unextended_sp(); + nextfp = (uint64_t)sender.fp(); + nextpc = (uint64_t)sender.pc(); + nextsp = (uint64_t)sender.unextended_sp(); } else { nextfp = p[frame::link_offset]; nextpc = p[frame::return_addr_offset]; - nextsp = (unsigned long)&p[frame::sender_sp_offset]; + nextsp = (uint64_t)&p[frame::sender_sp_offset]; } - if (bcx == -1ul) + if (bcx == -1ull) bcx = p[frame::interpreter_frame_bcp_offset]; if (Interpreter::contains((address)pc)) { @@ -746,8 +746,8 @@ internal_pf (nextsp, nextfp, nextpc, -1); } -extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc, - unsigned long bcx, unsigned long thread) { +extern "C" void pf(uint64_t sp, uint64_t fp, uint64_t pc, + uint64_t bcx, uint64_t thread) { if (!reg_map) { reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtNone); ::new (reg_map) RegisterMap((JavaThread*)thread, false); @@ -766,9 +766,9 @@ // support for printing out where we are in a Java method // needs to be passed current fp and bcp register values // prints method name, bc index and bytecode name -extern "C" void pm(unsigned long fp, unsigned long bcx) { +extern "C" void pm(uint64_t fp, uint64_t bcx) { DESCRIBE_FP_OFFSET(interpreter_frame_method); - unsigned long *p = (unsigned long *)fp; + uint64_t *p = (uint64_t *)fp; Method* m = (Method*)p[frame::interpreter_frame_method_offset]; printbc(m, bcx); } diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp @@ -178,7 +178,7 @@ Label retry; __ bind(retry); { - unsigned long offset; + uint64_t offset; __ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset); __ ldr(heap_end, Address(rscratch1, offset)); } @@ -187,7 +187,7 @@ // Get the current top of the heap { - unsigned long offset; + uint64_t offset; __ adrp(rscratch1, heap_top, offset); // Use add() here after ARDP, rather than lea(). // lea() does not generate anything if its offset is zero. diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp @@ -206,7 +206,7 @@ BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); // The Address offset is too large to direct load - -784. Our range is +127, -128. - __ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) - + __ mov(tmp, (int64_t)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) - in_bytes(JavaThread::jni_environment_offset()))); // Load address bad mask diff --git a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp --- a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp @@ -22,7 +22,7 @@ * */ -#include +#include "utilities/globalDefinitions.hpp" #include "immediate_aarch64.hpp" // there are at most 2^13 possible logical immediate encodings @@ -34,14 +34,14 @@ // for forward lookup we just use a direct array lookup // and assume that the cient has supplied a valid encoding // table[encoding] = immediate -static u_int64_t LITable[LI_TABLE_SIZE]; +static uint64_t LITable[LI_TABLE_SIZE]; // for reverse lookup we need a sparse map so we store a table of // immediate and encoding pairs sorted by immediate value struct li_pair { - u_int64_t immediate; - u_int32_t encoding; + uint64_t immediate; + uint32_t encoding; }; static struct li_pair InverseLITable[LI_TABLE_SIZE]; @@ -63,9 +63,9 @@ // helper functions used by expandLogicalImmediate // for i = 1, ... N result = 1 other bits are zero -static inline u_int64_t ones(int N) +static inline uint64_t ones(int N) { - return (N == 64 ? (u_int64_t)-1UL : ((1UL << N) - 1)); + return (N == 64 ? (uint64_t)-1ULL : ((1ULL << N) - 1)); } /* @@ -73,49 +73,49 @@ */ // 32 bit mask with bits [hi,...,lo] set -static inline u_int32_t mask32(int hi = 31, int lo = 0) +static inline uint32_t mask32(int hi = 31, int lo = 0) { int nbits = (hi + 1) - lo; return ((1 << nbits) - 1) << lo; } -static inline u_int64_t mask64(int hi = 63, int lo = 0) +static inline uint64_t mask64(int hi = 63, int lo = 0) { int nbits = (hi + 1) - lo; return ((1L << nbits) - 1) << lo; } // pick bits [hi,...,lo] from val -static inline u_int32_t pick32(u_int32_t val, int hi = 31, int lo = 0) +static inline uint32_t pick32(uint32_t val, int hi = 31, int lo = 0) { return (val & mask32(hi, lo)); } // pick bits [hi,...,lo] from val -static inline u_int64_t pick64(u_int64_t val, int hi = 31, int lo = 0) +static inline uint64_t pick64(uint64_t val, int hi = 31, int lo = 0) { return (val & mask64(hi, lo)); } // mask [hi,lo] and shift down to start at bit 0 -static inline u_int32_t pickbits32(u_int32_t val, int hi = 31, int lo = 0) +static inline uint32_t pickbits32(uint32_t val, int hi = 31, int lo = 0) { return (pick32(val, hi, lo) >> lo); } // mask [hi,lo] and shift down to start at bit 0 -static inline u_int64_t pickbits64(u_int64_t val, int hi = 63, int lo = 0) +static inline uint64_t pickbits64(uint64_t val, int hi = 63, int lo = 0) { return (pick64(val, hi, lo) >> lo); } // result<0> to val -static inline u_int64_t pickbit(u_int64_t val, int N) +static inline uint64_t pickbit(uint64_t val, int N) { return pickbits64(val, N, N); } -static inline u_int32_t uimm(u_int32_t val, int hi, int lo) +static inline uint32_t uimm(uint32_t val, int hi, int lo) { return pickbits32(val, hi, lo); } @@ -123,11 +123,11 @@ // SPEC bits(M*N) Replicate(bits(M) x, integer N); // this is just an educated guess -u_int64_t replicate(u_int64_t bits, int nbits, int count) +uint64_t replicate(uint64_t bits, int nbits, int count) { - u_int64_t result = 0; + uint64_t result = 0; // nbits may be 64 in which case we want mask to be -1 - u_int64_t mask = ones(nbits); + uint64_t mask = ones(nbits); for (int i = 0; i < count ; i++) { result <<= nbits; result |= (bits & mask); @@ -140,24 +140,24 @@ // encoding must be treated as an UNALLOC instruction // construct a 32 bit immediate value for a logical immediate operation -int expandLogicalImmediate(u_int32_t immN, u_int32_t immr, - u_int32_t imms, u_int64_t &bimm) +int expandLogicalImmediate(uint32_t immN, uint32_t immr, + uint32_t imms, uint64_t &bimm) { int len; // ought to be <= 6 - u_int32_t levels; // 6 bits - u_int32_t tmask_and; // 6 bits - u_int32_t wmask_and; // 6 bits - u_int32_t tmask_or; // 6 bits - u_int32_t wmask_or; // 6 bits - u_int64_t imm64; // 64 bits - u_int64_t tmask, wmask; // 64 bits - u_int32_t S, R, diff; // 6 bits? + uint32_t levels; // 6 bits + uint32_t tmask_and; // 6 bits + uint32_t wmask_and; // 6 bits + uint32_t tmask_or; // 6 bits + uint32_t wmask_or; // 6 bits + uint64_t imm64; // 64 bits + uint64_t tmask, wmask; // 64 bits + uint32_t S, R, diff; // 6 bits? if (immN == 1) { len = 6; // looks like 7 given the spec above but this cannot be! } else { len = 0; - u_int32_t val = (~imms & 0x3f); + uint32_t val = (~imms & 0x3f); for (int i = 5; i > 0; i--) { if (val & (1 << i)) { len = i; @@ -170,7 +170,7 @@ // for valid inputs leading 1s in immr must be less than leading // zeros in imms int len2 = 0; // ought to be < len - u_int32_t val2 = (~immr & 0x3f); + uint32_t val2 = (~immr & 0x3f); for (int i = 5; i > 0; i--) { if (!(val2 & (1 << i))) { len2 = i; @@ -199,12 +199,12 @@ for (int i = 0; i < 6; i++) { int nbits = 1 << i; - u_int64_t and_bit = pickbit(tmask_and, i); - u_int64_t or_bit = pickbit(tmask_or, i); - u_int64_t and_bits_sub = replicate(and_bit, 1, nbits); - u_int64_t or_bits_sub = replicate(or_bit, 1, nbits); - u_int64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits); - u_int64_t or_bits_top = (0 << nbits) | or_bits_sub; + uint64_t and_bit = pickbit(tmask_and, i); + uint64_t or_bit = pickbit(tmask_or, i); + uint64_t and_bits_sub = replicate(and_bit, 1, nbits); + uint64_t or_bits_sub = replicate(or_bit, 1, nbits); + uint64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits); + uint64_t or_bits_top = (0 << nbits) | or_bits_sub; tmask = ((tmask & (replicate(and_bits_top, 2 * nbits, 32 / nbits))) @@ -218,12 +218,12 @@ for (int i = 0; i < 6; i++) { int nbits = 1 << i; - u_int64_t and_bit = pickbit(wmask_and, i); - u_int64_t or_bit = pickbit(wmask_or, i); - u_int64_t and_bits_sub = replicate(and_bit, 1, nbits); - u_int64_t or_bits_sub = replicate(or_bit, 1, nbits); - u_int64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub; - u_int64_t or_bits_top = (or_bits_sub << nbits) | 0; + uint64_t and_bit = pickbit(wmask_and, i); + uint64_t or_bit = pickbit(wmask_or, i); + uint64_t and_bits_sub = replicate(and_bit, 1, nbits); + uint64_t or_bits_sub = replicate(or_bit, 1, nbits); + uint64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub; + uint64_t or_bits_top = (or_bits_sub << nbits) | 0; wmask = ((wmask & (replicate(and_bits_top, 2 * nbits, 32 / nbits))) @@ -248,9 +248,9 @@ { li_table_entry_count = 0; for (unsigned index = 0; index < LI_TABLE_SIZE; index++) { - u_int32_t N = uimm(index, 12, 12); - u_int32_t immr = uimm(index, 11, 6); - u_int32_t imms = uimm(index, 5, 0); + uint32_t N = uimm(index, 12, 12); + uint32_t immr = uimm(index, 11, 6); + uint32_t imms = uimm(index, 5, 0); if (expandLogicalImmediate(N, immr, imms, LITable[index])) { InverseLITable[li_table_entry_count].immediate = LITable[index]; InverseLITable[li_table_entry_count].encoding = index; @@ -264,12 +264,12 @@ // public APIs provided for logical immediate lookup and reverse lookup -u_int64_t logical_immediate_for_encoding(u_int32_t encoding) +uint64_t logical_immediate_for_encoding(uint32_t encoding) { return LITable[encoding]; } -u_int32_t encoding_for_logical_immediate(u_int64_t immediate) +uint32_t encoding_for_logical_immediate(uint64_t immediate) { struct li_pair pair; struct li_pair *result; @@ -293,15 +293,15 @@ // fpimm[3:0] = fraction (assuming leading 1) // i.e. F = s * 1.f * 2^(e - b) -u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp) +uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp) { union { float fpval; double dpval; - u_int64_t val; + uint64_t val; }; - u_int32_t s, e, f; + uint32_t s, e, f; s = (imm8 >> 7 ) & 0x1; e = (imm8 >> 4) & 0x7; f = imm8 & 0xf; @@ -329,7 +329,7 @@ return val; } -u_int32_t encoding_for_fp_immediate(float immediate) +uint32_t encoding_for_fp_immediate(float immediate) { // given a float which is of the form // @@ -341,10 +341,10 @@ union { float fpval; - u_int32_t val; + uint32_t val; }; fpval = immediate; - u_int32_t s, r, f, res; + uint32_t s, r, f, res; // sign bit is 31 s = (val >> 31) & 0x1; // exponent is bits 30-23 but we only want the bottom 3 bits diff --git a/src/hotspot/cpu/aarch64/immediate_aarch64.hpp b/src/hotspot/cpu/aarch64/immediate_aarch64.hpp --- a/src/hotspot/cpu/aarch64/immediate_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/immediate_aarch64.hpp @@ -46,9 +46,9 @@ * encoding then a map lookup will return 0xffffffff. */ -u_int64_t logical_immediate_for_encoding(u_int32_t encoding); -u_int32_t encoding_for_logical_immediate(u_int64_t immediate); -u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp); -u_int32_t encoding_for_fp_immediate(float immediate); +uint64_t logical_immediate_for_encoding(uint32_t encoding); +uint32_t encoding_for_logical_immediate(uint64_t immediate); +uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp); +uint32_t encoding_for_fp_immediate(float immediate); #endif // _IMMEDIATE_H diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -168,7 +168,7 @@ } void InterpreterMacroAssembler::get_dispatch() { - unsigned long offset; + uint64_t offset; adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset); lea(rdispatch, Address(rdispatch, offset)); } @@ -765,7 +765,7 @@ // copy mov(rscratch1, sp); sub(swap_reg, swap_reg, rscratch1); - ands(swap_reg, swap_reg, (unsigned long)(7 - os::vm_page_size())); + ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size())); // Save the test result, for recursive case, the result is zero str(swap_reg, Address(lock_reg, mark_offset)); diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp --- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp @@ -346,7 +346,7 @@ if (_num_fp_args < Argument::n_float_register_parameters_c) { *_fp_args++ = from_obj; - *_fp_identifiers |= (1 << _num_fp_args); // mark as double + *_fp_identifiers |= (1ULL << _num_fp_args); // mark as double _num_fp_args++; } else { *_to++ = from_obj; diff --git a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp --- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp @@ -73,7 +73,7 @@ Label slow; - unsigned long offset; + uint64_t offset; __ adrp(rcounter_addr, SafepointSynchronize::safepoint_counter_addr(), offset); Address safepoint_counter_addr(rcounter_addr, offset); @@ -88,7 +88,7 @@ // Check to see if a field access watch has been set before we // take the fast path. - unsigned long offset2; + uint64_t offset2; __ adrp(result, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()), offset2); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -70,8 +70,8 @@ // Return the total length (in bytes) of the instructions. int MacroAssembler::pd_patch_instruction_size(address branch, address target) { int instructions = 1; - assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant"); - long offset = (target - branch) >> 2; + assert((uint64_t)target < ((uint64_t)1 << 48), "48-bit overflow in address constant"); + int64_t offset = (target - branch) >> 2; unsigned insn = *(unsigned*)branch; if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) { // Load register (literal) @@ -93,7 +93,7 @@ offset = target-branch; int shift = Instruction_aarch64::extract(insn, 31, 31); if (shift) { - u_int64_t dest = (u_int64_t)target; + uint64_t dest = (uint64_t)target; uint64_t pc_page = (uint64_t)branch >> 12; uint64_t adr_page = (uint64_t)target >> 12; unsigned offset_lo = dest & 0xfff; @@ -134,9 +134,9 @@ Instruction_aarch64::extract(insn2, 4, 0)) { // movk #imm16<<32 Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32); - long dest = ((long)target & 0xffffffffL) | ((long)branch & 0xffff00000000L); - long pc_page = (long)branch >> 12; - long adr_page = (long)dest >> 12; + int64_t dest = ((int64_t)target & 0xffffffffL) | ((int64_t)branch & 0xffff00000000L); + int64_t pc_page = (int64_t)branch >> 12; + int64_t adr_page = (int64_t)dest >> 12; offset = adr_page - pc_page; instructions = 2; } @@ -146,7 +146,7 @@ Instruction_aarch64::spatch(branch, 23, 5, offset); Instruction_aarch64::patch(branch, 30, 29, offset_lo); } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) { - u_int64_t dest = (u_int64_t)target; + uint64_t dest = (uint64_t)target; // Move wide constant assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch"); assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch"); @@ -205,7 +205,7 @@ } address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { - long offset = 0; + int64_t offset = 0; if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) { // Load register (literal) offset = Instruction_aarch64::sextract(insn, 23, 5); @@ -272,13 +272,13 @@ ShouldNotReachHere(); } } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { - u_int32_t *insns = (u_int32_t *)insn_addr; + uint32_t *insns = (uint32_t *)insn_addr; // Move wide constant: movz, movk, movk. See movptr(). assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); - return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) - + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) - + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); + return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5)) + + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) + + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { return 0; @@ -389,7 +389,7 @@ assert(CodeCache::find_blob(entry.target()) != NULL, "destination of far call not found in code cache"); if (far_branches()) { - unsigned long offset; + uint64_t offset; // We can use ADRP here because we know that the total size of // the code cache cannot exceed 2Gb. adrp(tmp, entry, offset); @@ -407,7 +407,7 @@ assert(CodeCache::find_blob(entry.target()) != NULL, "destination of far call not found in code cache"); if (far_branches()) { - unsigned long offset; + uint64_t offset; // We can use ADRP here because we know that the total size of // the code cache cannot exceed 2Gb. adrp(tmp, entry, offset); @@ -824,7 +824,7 @@ address MacroAssembler::ic_call(address entry, jint method_index) { RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); // address const_ptr = long_constant((jlong)Universe::non_oop_word()); - // unsigned long offset; + // uint64_t offset; // ldr_constant(rscratch2, const_ptr); movptr(rscratch2, (uintptr_t)Universe::non_oop_word()); return trampoline_call(Address(entry, rh)); @@ -1491,7 +1491,7 @@ void MacroAssembler::mov(Register r, Address dest) { code_section()->relocate(pc(), dest.rspec()); - u_int64_t imm64 = (u_int64_t)dest.target(); + uint64_t imm64 = (uint64_t)dest.target(); movptr(r, imm64); } @@ -1507,7 +1507,7 @@ block_comment(buffer); } #endif - assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); + assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); movz(r, imm64 & 0xffff); imm64 >>= 16; movk(r, imm64 & 0xffff, 16); @@ -1524,20 +1524,20 @@ // imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh // imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh // T1D/T2D: invalid -void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { +void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32) { assert(T != T1D && T != T2D, "invalid arrangement"); if (T == T8B || T == T16B) { assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)"); movi(Vd, T, imm32 & 0xff, 0); return; } - u_int32_t nimm32 = ~imm32; + uint32_t nimm32 = ~imm32; if (T == T4H || T == T8H) { assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)"); imm32 &= 0xffff; nimm32 &= 0xffff; } - u_int32_t x = imm32; + uint32_t x = imm32; int movi_cnt = 0; int movn_cnt = 0; while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } @@ -1561,7 +1561,7 @@ } } -void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) +void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) { #ifndef PRODUCT { @@ -1575,7 +1575,7 @@ } else { // we can use a combination of MOVZ or MOVN with // MOVK to build up the constant - u_int64_t imm_h[4]; + uint64_t imm_h[4]; int zero_count = 0; int neg_count = 0; int i; @@ -1596,7 +1596,7 @@ } else if (zero_count == 3) { for (i = 0; i < 4; i++) { if (imm_h[i] != 0L) { - movz(dst, (u_int32_t)imm_h[i], (i << 4)); + movz(dst, (uint32_t)imm_h[i], (i << 4)); break; } } @@ -1604,7 +1604,7 @@ // one MOVN will do for (int i = 0; i < 4; i++) { if (imm_h[i] != 0xffffL) { - movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); + movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); break; } } @@ -1612,69 +1612,69 @@ // one MOVZ and one MOVK will do for (i = 0; i < 3; i++) { if (imm_h[i] != 0L) { - movz(dst, (u_int32_t)imm_h[i], (i << 4)); + movz(dst, (uint32_t)imm_h[i], (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0L) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else if (neg_count == 2) { // one MOVN and one MOVK will do for (i = 0; i < 4; i++) { if (imm_h[i] != 0xffffL) { - movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); + movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0xffffL) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else if (zero_count == 1) { // one MOVZ and two MOVKs will do for (i = 0; i < 4; i++) { if (imm_h[i] != 0L) { - movz(dst, (u_int32_t)imm_h[i], (i << 4)); + movz(dst, (uint32_t)imm_h[i], (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0x0L) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else if (neg_count == 1) { // one MOVN and two MOVKs will do for (i = 0; i < 4; i++) { if (imm_h[i] != 0xffffL) { - movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); + movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0xffffL) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else { // use a MOVZ and 3 MOVKs (makes it easier to debug) - movz(dst, (u_int32_t)imm_h[0], 0); + movz(dst, (uint32_t)imm_h[0], 0); for (i = 1; i < 4; i++) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } } -void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) +void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) { #ifndef PRODUCT { @@ -1688,7 +1688,7 @@ } else { // we can use MOVZ, MOVN or two calls to MOVK to build up the // constant - u_int32_t imm_h[2]; + uint32_t imm_h[2]; imm_h[0] = imm32 & 0xffff; imm_h[1] = ((imm32 >> 16) & 0xffff); if (imm_h[0] == 0) { @@ -1711,7 +1711,7 @@ // not actually be used: you must use the Address that is returned. // It is up to you to ensure that the shift provided matches the size // of your data. -Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) { +Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { if (Address::offset_ok_for_immed(byte_offset, shift)) // It fits; no need for any heroics return Address(base, byte_offset); @@ -1726,8 +1726,8 @@ // See if we can do this with two 12-bit offsets { - unsigned long word_offset = byte_offset >> shift; - unsigned long masked_offset = word_offset & 0xfff000; + uint64_t word_offset = byte_offset >> shift; + uint64_t masked_offset = word_offset & 0xfff000; if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { add(Rd, base, masked_offset << shift); @@ -1968,7 +1968,7 @@ if (value < (1 << 12)) { sub(reg, reg, value); return; } /* else */ { assert(reg != rscratch2, "invalid dst for register decrement"); - mov(rscratch2, (unsigned long)value); + mov(rscratch2, (uint64_t) value); sub(reg, reg, rscratch2); } } @@ -2692,19 +2692,19 @@ // Returns true if it is, else false. bool MacroAssembler::merge_alignment_check(Register base, size_t size, - long cur_offset, - long prev_offset) const { + int64_t cur_offset, + int64_t prev_offset) const { if (AvoidUnalignedAccesses) { if (base == sp) { // Checks whether low offset if aligned to pair of registers. - long pair_mask = size * 2 - 1; - long offset = prev_offset > cur_offset ? cur_offset : prev_offset; + int64_t pair_mask = size * 2 - 1; + int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; return (offset & pair_mask) == 0; } else { // If base is not sp, we can't guarantee the access is aligned. return false; } } else { - long mask = size - 1; + int64_t mask = size - 1; // Load/store pair instruction only supports element size aligned offset. return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; } @@ -2737,8 +2737,8 @@ return false; } - long max_offset = 63 * prev_size_in_bytes; - long min_offset = -64 * prev_size_in_bytes; + int64_t max_offset = 63 * prev_size_in_bytes; + int64_t min_offset = -64 * prev_size_in_bytes; assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); @@ -2747,8 +2747,8 @@ return false; } - long cur_offset = adr.offset(); - long prev_offset = prev_ldst->offset(); + int64_t cur_offset = adr.offset(); + int64_t prev_offset = prev_ldst->offset(); size_t diff = abs(cur_offset - prev_offset); if (diff != prev_size_in_bytes) { return false; @@ -2765,7 +2765,7 @@ return false; } - long low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; + int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; // Offset range must be in ldp/stp instruction's range. if (low_offset > max_offset || low_offset < min_offset) { return false; @@ -2790,7 +2790,7 @@ address prev = pc() - NativeInstruction::instruction_size; NativeLdSt* prev_ldst = NativeLdSt_at(prev); - long offset; + int64_t offset; if (adr.offset() < prev_ldst->offset()) { offset = adr.offset(); @@ -3037,12 +3037,12 @@ // First Loop. // - // final static long LONG_MASK = 0xffffffffL; + // final static int64_t LONG_MASK = 0xffffffffL; // int xstart = xlen - 1; // int ystart = ylen - 1; - // long carry = 0; + // int64_t carry = 0; // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { - // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; + // int64_t product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; // z[kdx] = (int)product; // carry = product >>> 32; // } @@ -3080,7 +3080,7 @@ // for (int i = xstart-1; i >= 0; i--) { // Second loop // carry = 0; // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop - // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + + // int64_t product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + // (z[k] & LONG_MASK) + carry; // z[k] = (int)product; // carry = product >>> 32; @@ -3336,7 +3336,7 @@ Register table0, Register table1, Register table2, Register table3, Register tmp, Register tmp2, Register tmp3) { Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; - unsigned long offset; + uint64_t offset; if (UseCRC32) { kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); @@ -3638,7 +3638,7 @@ SkipIfEqual::SkipIfEqual( MacroAssembler* masm, const bool* flag_addr, bool value) { _masm = masm; - unsigned long offset; + uint64_t offset; _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); _masm->ldrb(rscratch1, Address(rscratch1, offset)); _masm->cbzw(rscratch1, _label); @@ -3667,7 +3667,7 @@ } void MacroAssembler::cmpptr(Register src1, Address src2) { - unsigned long offset; + uint64_t offset; adrp(rscratch1, src2, offset); ldr(rscratch1, Address(rscratch1, offset)); cmp(src1, rscratch1); @@ -4329,13 +4329,13 @@ return inst_mark(); } -void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) { +void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { relocInfo::relocType rtype = dest.rspec().reloc()->type(); - unsigned long low_page = (unsigned long)CodeCache::low_bound() >> 12; - unsigned long high_page = (unsigned long)(CodeCache::high_bound()-1) >> 12; - unsigned long dest_page = (unsigned long)dest.target() >> 12; - long offset_low = dest_page - low_page; - long offset_high = dest_page - high_page; + uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; + uint64_t high_page = (uint64_t)(CodeCache::high_bound() - 1) >> 12; + uint64_t dest_page = (uint64_t)dest.target() >> 12; + int64_t offset_low = dest_page - low_page; + int64_t offset_high = dest_page - high_page; assert(is_valid_AArch64_address(dest.target()), "bad address"); assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); @@ -4347,14 +4347,14 @@ if (offset_high >= -(1<<20) && offset_low < (1<<20)) { _adrp(reg1, dest.target()); } else { - unsigned long target = (unsigned long)dest.target(); - unsigned long adrp_target - = (target & 0xffffffffUL) | ((unsigned long)pc() & 0xffff00000000UL); + uint64_t target = (uint64_t)dest.target(); + uint64_t adrp_target + = (target & 0xffffffffUL) | ((uint64_t)pc() & 0xffff00000000UL); _adrp(reg1, (address)adrp_target); movk(reg1, target >> 32, 32); } - byte_offset = (unsigned long)dest.target() & 0xfff; + byte_offset = (uint64_t)dest.target() & 0xfff; } void MacroAssembler::load_byte_map_base(Register reg) { @@ -4364,7 +4364,7 @@ if (is_valid_AArch64_address((address)byte_map_base)) { // Strictly speaking the byte_map_base isn't an address at all, // and it might even be negative. - unsigned long offset; + uint64_t offset; adrp(reg, ExternalAddress((address)byte_map_base), offset); // We expect offset to be zero with most collectors. if (offset != 0) { @@ -4409,7 +4409,6 @@ } } - // This method checks if provided byte array contains byte with highest bit set. void MacroAssembler::has_negatives(Register ary1, Register len, Register result) { // Simple and most common case of aligned small array which is not at the @@ -4807,7 +4806,7 @@ // base: Address of a buffer to be zeroed, 8 bytes aligned. // cnt: Immediate count in HeapWords. #define SmallArraySize (18 * BytesPerLong) -void MacroAssembler::zero_words(Register base, u_int64_t cnt) +void MacroAssembler::zero_words(Register base, uint64_t cnt) { BLOCK_COMMENT("zero_words {"); int i = cnt & 1; // store any odd word to start diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -456,8 +456,8 @@ // first two private routines for loading 32 bit or 64 bit constants private: - void mov_immediate64(Register dst, u_int64_t imm64); - void mov_immediate32(Register dst, u_int32_t imm32); + void mov_immediate64(Register dst, uint64_t imm64); + void mov_immediate32(Register dst, uint32_t imm32); int push(unsigned int bitset, Register stack); int pop(unsigned int bitset, Register stack); @@ -486,27 +486,27 @@ inline void mov(Register dst, address addr) { - mov_immediate64(dst, (u_int64_t)addr); + mov_immediate64(dst, (uint64_t)addr); } - inline void mov(Register dst, u_int64_t imm64) + inline void mov(Register dst, uint64_t imm64) { mov_immediate64(dst, imm64); } - inline void movw(Register dst, u_int32_t imm32) + inline void movw(Register dst, uint32_t imm32) { mov_immediate32(dst, imm32); } - inline void mov(Register dst, long l) + inline void mov(Register dst, int64_t l) { - mov(dst, (u_int64_t)l); + mov(dst, (uint64_t)l); } inline void mov(Register dst, int i) { - mov(dst, (long)i); + mov(dst, (int64_t)i); } void mov(Register dst, RegisterOrConstant src) { @@ -518,7 +518,7 @@ void movptr(Register r, uintptr_t imm64); - void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32); + void mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32); void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { orr(Vd, T, Vn, Vn); @@ -1170,7 +1170,7 @@ void sub(Register Rd, Register Rn, RegisterOrConstant decrement); void subw(Register Rd, Register Rn, RegisterOrConstant decrement); - void adrp(Register reg1, const Address &dest, unsigned long &byte_offset); + void adrp(Register reg1, const Address &dest, uint64_t &byte_offset); void tableswitch(Register index, jint lowbound, jint highbound, Label &jumptable, Label &jumptable_end, int stride = 1) { @@ -1187,7 +1187,7 @@ // actually be used: you must use the Address that is returned. It // is up to you to ensure that the shift provided matches the size // of your data. - Address form_address(Register Rd, Register base, long byte_offset, int shift); + Address form_address(Register Rd, Register base, int64_t byte_offset, int shift); // Return true iff an address is within the 48-bit AArch64 address // space. @@ -1212,7 +1212,7 @@ if (NearCpool) { ldr(dest, const_addr); } else { - unsigned long offset; + uint64_t offset; adrp(dest, InternalAddress(const_addr.target()), offset); ldr(dest, Address(dest, offset)); } @@ -1237,7 +1237,7 @@ int elem_size); void fill_words(Register base, Register cnt, Register value); - void zero_words(Register base, u_int64_t cnt); + void zero_words(Register base, uint64_t cnt); void zero_words(Register ptr, Register cnt); void zero_dcache_blocks(Register base, Register cnt); @@ -1310,7 +1310,7 @@ // Uses rscratch2 if the address is not directly reachable Address spill_address(int size, int offset, Register tmp=rscratch2); - bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const; + bool merge_alignment_check(Register base, size_t size, int64_t cur_offset, int64_t prev_offset) const; // Check whether two loads/stores can be merged into ldp/stp. bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const; diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp @@ -260,9 +260,9 @@ Register tmp4, Register tmp5) { Label DONE, CHECK_CORNER_CASES, SMALL_VALUE, MAIN, CHECKED_CORNER_CASES, RETURN_MINF_OR_NAN; - const long INF_OR_NAN_PREFIX = 0x7FF0; - const long MINF_OR_MNAN_PREFIX = 0xFFF0; - const long ONE_PREFIX = 0x3FF0; + const int64_t INF_OR_NAN_PREFIX = 0x7FF0; + const int64_t MINF_OR_MNAN_PREFIX = 0xFFF0; + const int64_t ONE_PREFIX = 0x3FF0; movz(tmp2, ONE_PREFIX, 48); movz(tmp4, 0x0010, 48); fmovd(rscratch1, v0); // rscratch1 = AS_LONG_BITS(X) diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp @@ -201,9 +201,9 @@ // NOTE: fpu registers are actively reused. See comments in code about their usage void MacroAssembler::generate__ieee754_rem_pio2(address npio2_hw, address two_over_pi, address pio2) { - const long PIO2_1t = 0x3DD0B4611A626331UL; - const long PIO2_2 = 0x3DD0B4611A600000UL; - const long PIO2_2t = 0x3BA3198A2E037073UL; + const int64_t PIO2_1t = 0x3DD0B4611A626331UL; + const int64_t PIO2_2 = 0x3DD0B4611A600000UL; + const int64_t PIO2_2t = 0x3BA3198A2E037073UL; Label X_IS_NEGATIVE, X_IS_MEDIUM_OR_LARGE, X_IS_POSITIVE_LONG_PI, LARGE_ELSE, REDUCTION_DONE, X_IS_MEDIUM_BRANCH_DONE, X_IS_LARGE, NX_SET, X_IS_NEGATIVE_LONG_PI; diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp @@ -304,7 +304,7 @@ unsigned insn = *(unsigned*)pc; if (maybe_cpool_ref(pc)) { address addr = MacroAssembler::target_addr_for_insn(pc); - *(long*)addr = x; + *(int64_t*)addr = x; } else { MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x)); ICache::invalidate_range(instruction_address(), instruction_size); diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp @@ -684,7 +684,7 @@ return 0; } } - size_t size_in_bytes() { return 1 << size(); } + size_t size_in_bytes() { return 1ULL << size(); } bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); } bool is_load() { assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || diff --git a/src/hotspot/cpu/aarch64/register_aarch64.hpp b/src/hotspot/cpu/aarch64/register_aarch64.hpp --- a/src/hotspot/cpu/aarch64/register_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/register_aarch64.hpp @@ -65,7 +65,7 @@ // Return the bit which represents this register. This is intended // to be ORed into a bitmask: for usage see class RegSet below. - unsigned long bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; } + uint64_t bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; } }; // The integer registers of the aarch64 architecture diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -1700,7 +1700,7 @@ Label dtrace_method_entry, dtrace_method_entry_done; { - unsigned long offset; + uint64_t offset; __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); __ ldrb(rscratch1, Address(rscratch1, offset)); __ cbnzw(rscratch1, dtrace_method_entry); @@ -1914,7 +1914,7 @@ Label dtrace_method_exit, dtrace_method_exit_done; { - unsigned long offset; + uint64_t offset; __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); __ ldrb(rscratch1, Address(rscratch1, offset)); __ cbnzw(rscratch1, dtrace_method_exit); diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -3283,8 +3283,8 @@ // Max number of bytes we can process before having to take the mod // 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 - unsigned long BASE = 0xfff1; - unsigned long NMAX = 0x15B0; + uint64_t BASE = 0xfff1; + uint64_t NMAX = 0x15B0; __ mov(base, BASE); __ mov(nmax, NMAX); @@ -5381,12 +5381,12 @@ // In C, approximately: // void - // montgomery_multiply(unsigned long Pa_base[], unsigned long Pb_base[], - // unsigned long Pn_base[], unsigned long Pm_base[], - // unsigned long inv, int len) { - // unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator - // unsigned long *Pa, *Pb, *Pn, *Pm; - // unsigned long Ra, Rb, Rn, Rm; + // montgomery_multiply(uint64_t Pa_base[], uint64_t Pb_base[], + // uint64_t Pn_base[], uint64_t Pm_base[], + // uint64_t inv, int len) { + // uint64_t t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator + // uint64_t *Pa, *Pb, *Pn, *Pm; + // uint64_t Ra, Rb, Rn, Rm; // int i; @@ -5594,11 +5594,11 @@ // In C, approximately: // void - // montgomery_square(unsigned long Pa_base[], unsigned long Pn_base[], - // unsigned long Pm_base[], unsigned long inv, int len) { - // unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator - // unsigned long *Pa, *Pb, *Pn, *Pm; - // unsigned long Ra, Rb, Rn, Rm; + // montgomery_square(uint64_t Pa_base[], uint64_t Pn_base[], + // uint64_t Pm_base[], uint64_t inv, int len) { + // uint64_t t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator + // uint64_t *Pa, *Pb, *Pn, *Pm; + // uint64_t Ra, Rb, Rn, Rm; // int i; diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp --- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp @@ -994,7 +994,7 @@ __ ldrw(val, Address(esp, 0)); // byte value __ ldrw(crc, Address(esp, wordSize)); // Initial CRC - unsigned long offset; + uint64_t offset; __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset); __ add(tbl, tbl, offset); diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -1706,7 +1706,7 @@ Label done; __ pop_l(r1); __ cmp(r1, r0); - __ mov(r0, (u_int64_t)-1L); + __ mov(r0, (uint64_t)-1L); __ br(Assembler::LT, done); // __ mov(r0, 1UL); // __ csel(r0, r0, zr, Assembler::NE); @@ -1730,7 +1730,7 @@ if (unordered_result < 0) { // we want -1 for unordered or less than, 0 for equal and 1 for // greater than. - __ mov(r0, (u_int64_t)-1L); + __ mov(r0, (uint64_t)-1L); // for FP LT tests less than or unordered __ br(Assembler::LT, done); // install 0 for EQ otherwise 1 diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp @@ -161,7 +161,7 @@ SoftwarePrefetchHintDistance &= ~7; } - unsigned long auxv = getauxval(AT_HWCAP); + uint64_t auxv = getauxval(AT_HWCAP); char buf[512]; @@ -173,7 +173,7 @@ char buf[1024], *p; while (fgets(buf, sizeof (buf), f) != NULL) { if ((p = strchr(buf, ':')) != NULL) { - long v = strtol(p+1, NULL, 0); + int64_t v = strtol(p+1, NULL, 0); if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) { _cpu = v; cpu_lines++; diff --git a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp --- a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp +++ b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include