1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020 Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 */ 24 25 #include <stdio.h> 26 #include <sys/types.h> 27 28 #include "precompiled.hpp" 29 #include "asm/assembler.hpp" 30 #include "asm/assembler.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 33 #ifndef PRODUCT 34 const uintptr_t Assembler::asm_bp = 0x00007fffee09ac88; 35 #endif 36 37 #include "compiler/disassembler.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "runtime/interfaceSupport.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "immediate_aarch64.hpp" 42 43 extern "C" void entry(CodeBuffer *cb); 44 45 #define __ _masm. 46 #ifdef PRODUCT 47 #define BLOCK_COMMENT(str) /* nothing */ 48 #else 49 #define BLOCK_COMMENT(str) block_comment(str) 50 #endif 51 52 #define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":") 53 54 static float unpack(unsigned value); 55 56 short Assembler::SIMD_Size_in_bytes[] = { 57 // T8B, T16B, T4H, T8H, T2S, T4S, T1D, T2D, T1Q 58 8, 16, 8, 16, 8, 16, 8, 16, 16 59 }; 60 61 #ifdef ASSERT 62 static void asm_check(const unsigned int *insns, const unsigned int *insns1, size_t len) { 63 bool ok = true; 64 for (unsigned int i = 0; i < len; i++) { 65 if (insns[i] != insns1[i]) { 66 ok = false; 67 printf("Ours:\n"); 68 Disassembler::decode((address)&insns1[i], (address)&insns1[i+1]); 69 printf("Theirs:\n"); 70 Disassembler::decode((address)&insns[i], (address)&insns[i+1]); 71 printf("\n"); 72 } 73 } 74 assert(ok, "Assembler smoke test failed"); 75 } 76 77 void entry(CodeBuffer *cb) { 78 79 // { 80 // for (int i = 0; i < 256; i+=16) 81 // { 82 // printf("\"%20.20g\", ", unpack(i)); 83 // printf("\"%20.20g\", ", unpack(i+1)); 84 // } 85 // printf("\n"); 86 // } 87 88 Assembler _masm(cb); 89 address entry = __ pc(); 90 91 // Smoke test for assembler 92 93 // BEGIN Generated code -- do not edit 94 // Generated by aarch64-asmtest.py 95 Label back, forth; 96 __ bind(back); 97 98 // ArithOp 99 __ add(r26, r23, r13, Assembler::LSL, 32); // add x26, x23, x13, LSL #32 100 __ sub(r12, r24, r9, Assembler::LSR, 37); // sub x12, x24, x9, LSR #37 101 __ adds(r28, r15, r8, Assembler::ASR, 39); // adds x28, x15, x8, ASR #39 102 __ subs(r7, r28, r30, Assembler::ASR, 57); // subs x7, x28, x30, ASR #57 103 __ addw(r9, r22, r27, Assembler::ASR, 15); // add w9, w22, w27, ASR #15 104 __ subw(r3, r13, r18, Assembler::ASR, 30); // sub w3, w13, w18, ASR #30 105 __ addsw(r14, r26, r8, Assembler::ASR, 17); // adds w14, w26, w8, ASR #17 106 __ subsw(r0, r22, r12, Assembler::ASR, 21); // subs w0, w22, w12, ASR #21 107 __ andr(r0, r15, r26, Assembler::LSL, 20); // and x0, x15, x26, LSL #20 108 __ orr(r26, r5, r17, Assembler::LSL, 61); // orr x26, x5, x17, LSL #61 109 __ eor(r24, r13, r2, Assembler::LSL, 32); // eor x24, x13, x2, LSL #32 110 __ ands(r28, r3, r17, Assembler::ASR, 35); // ands x28, x3, x17, ASR #35 111 __ andw(r25, r16, r29, Assembler::LSR, 18); // and w25, w16, w29, LSR #18 112 __ orrw(r13, r18, r11, Assembler::LSR, 9); // orr w13, w18, w11, LSR #9 113 __ eorw(r5, r5, r18, Assembler::LSR, 15); // eor w5, w5, w18, LSR #15 114 __ andsw(r2, r23, r27, Assembler::ASR, 26); // ands w2, w23, w27, ASR #26 115 __ bic(r27, r28, r16, Assembler::LSR, 45); // bic x27, x28, x16, LSR #45 116 __ orn(r8, r25, r26, Assembler::ASR, 37); // orn x8, x25, x26, ASR #37 117 __ eon(r29, r17, r13, Assembler::LSR, 63); // eon x29, x17, x13, LSR #63 118 __ bics(r28, r24, r2, Assembler::LSR, 31); // bics x28, x24, x2, LSR #31 119 __ bicw(r19, r26, r7, Assembler::ASR, 3); // bic w19, w26, w7, ASR #3 120 __ ornw(r6, r24, r10, Assembler::ASR, 3); // orn w6, w24, w10, ASR #3 121 __ eonw(r4, r21, r1, Assembler::LSR, 29); // eon w4, w21, w1, LSR #29 122 __ bicsw(r16, r21, r0, Assembler::LSR, 19); // bics w16, w21, w0, LSR #19 123 124 // AddSubImmOp 125 __ addw(r17, r12, 379u); // add w17, w12, #379 126 __ addsw(r30, r1, 22u); // adds w30, w1, #22 127 __ subw(r29, r5, 126u); // sub w29, w5, #126 128 __ subsw(r6, r24, 960u); // subs w6, w24, #960 129 __ add(r0, r13, 104u); // add x0, x13, #104 130 __ adds(r8, r6, 663u); // adds x8, x6, #663 131 __ sub(r10, r5, 516u); // sub x10, x5, #516 132 __ subs(r1, r3, 1012u); // subs x1, x3, #1012 133 134 // LogicalImmOp 135 __ andw(r6, r11, 4294049777ull); // and w6, w11, #0xfff1fff1 136 __ orrw(r28, r5, 4294966791ull); // orr w28, w5, #0xfffffe07 137 __ eorw(r1, r20, 134217216ull); // eor w1, w20, #0x7fffe00 138 __ andsw(r7, r18, 1048576ull); // ands w7, w18, #0x100000 139 __ andr(r14, r12, 9223372036854775808ull); // and x14, x12, #0x8000000000000000 140 __ orr(r9, r11, 562675075514368ull); // orr x9, x11, #0x1ffc000000000 141 __ eor(r17, r0, 18014398509481728ull); // eor x17, x0, #0x3fffffffffff00 142 __ ands(r1, r8, 18446744073705357315ull); // ands x1, x8, #0xffffffffffc00003 143 144 // AbsOp 145 __ b(__ pc()); // b . 146 __ b(back); // b back 147 __ b(forth); // b forth 148 __ bl(__ pc()); // bl . 149 __ bl(back); // bl back 150 __ bl(forth); // bl forth 151 152 // RegAndAbsOp 153 __ cbzw(r10, __ pc()); // cbz w10, . 154 __ cbzw(r10, back); // cbz w10, back 155 __ cbzw(r10, forth); // cbz w10, forth 156 __ cbnzw(r8, __ pc()); // cbnz w8, . 157 __ cbnzw(r8, back); // cbnz w8, back 158 __ cbnzw(r8, forth); // cbnz w8, forth 159 __ cbz(r11, __ pc()); // cbz x11, . 160 __ cbz(r11, back); // cbz x11, back 161 __ cbz(r11, forth); // cbz x11, forth 162 __ cbnz(r29, __ pc()); // cbnz x29, . 163 __ cbnz(r29, back); // cbnz x29, back 164 __ cbnz(r29, forth); // cbnz x29, forth 165 __ adr(r19, __ pc()); // adr x19, . 166 __ adr(r19, back); // adr x19, back 167 __ adr(r19, forth); // adr x19, forth 168 __ _adrp(r19, __ pc()); // adrp x19, . 169 170 // RegImmAbsOp 171 __ tbz(r22, 6, __ pc()); // tbz x22, #6, . 172 __ tbz(r22, 6, back); // tbz x22, #6, back 173 __ tbz(r22, 6, forth); // tbz x22, #6, forth 174 __ tbnz(r12, 11, __ pc()); // tbnz x12, #11, . 175 __ tbnz(r12, 11, back); // tbnz x12, #11, back 176 __ tbnz(r12, 11, forth); // tbnz x12, #11, forth 177 178 // MoveWideImmOp 179 __ movnw(r0, 6301, 0); // movn w0, #6301, lsl 0 180 __ movzw(r7, 20886, 0); // movz w7, #20886, lsl 0 181 __ movkw(r27, 18617, 0); // movk w27, #18617, lsl 0 182 __ movn(r12, 22998, 16); // movn x12, #22998, lsl 16 183 __ movz(r20, 1532, 16); // movz x20, #1532, lsl 16 184 __ movk(r8, 5167, 32); // movk x8, #5167, lsl 32 185 186 // BitfieldOp 187 __ sbfm(r15, r17, 24, 28); // sbfm x15, x17, #24, #28 188 __ bfmw(r15, r9, 14, 25); // bfm w15, w9, #14, #25 189 __ ubfmw(r27, r25, 6, 31); // ubfm w27, w25, #6, #31 190 __ sbfm(r19, r2, 23, 31); // sbfm x19, x2, #23, #31 191 __ bfm(r12, r21, 10, 6); // bfm x12, x21, #10, #6 192 __ ubfm(r22, r0, 26, 16); // ubfm x22, x0, #26, #16 193 194 // ExtractOp 195 __ extrw(r3, r3, r20, 27); // extr w3, w3, w20, #27 196 __ extr(r8, r30, r3, 54); // extr x8, x30, x3, #54 197 198 // CondBranchOp 199 __ br(Assembler::EQ, __ pc()); // b.EQ . 200 __ br(Assembler::EQ, back); // b.EQ back 201 __ br(Assembler::EQ, forth); // b.EQ forth 202 __ br(Assembler::NE, __ pc()); // b.NE . 203 __ br(Assembler::NE, back); // b.NE back 204 __ br(Assembler::NE, forth); // b.NE forth 205 __ br(Assembler::HS, __ pc()); // b.HS . 206 __ br(Assembler::HS, back); // b.HS back 207 __ br(Assembler::HS, forth); // b.HS forth 208 __ br(Assembler::CS, __ pc()); // b.CS . 209 __ br(Assembler::CS, back); // b.CS back 210 __ br(Assembler::CS, forth); // b.CS forth 211 __ br(Assembler::LO, __ pc()); // b.LO . 212 __ br(Assembler::LO, back); // b.LO back 213 __ br(Assembler::LO, forth); // b.LO forth 214 __ br(Assembler::CC, __ pc()); // b.CC . 215 __ br(Assembler::CC, back); // b.CC back 216 __ br(Assembler::CC, forth); // b.CC forth 217 __ br(Assembler::MI, __ pc()); // b.MI . 218 __ br(Assembler::MI, back); // b.MI back 219 __ br(Assembler::MI, forth); // b.MI forth 220 __ br(Assembler::PL, __ pc()); // b.PL . 221 __ br(Assembler::PL, back); // b.PL back 222 __ br(Assembler::PL, forth); // b.PL forth 223 __ br(Assembler::VS, __ pc()); // b.VS . 224 __ br(Assembler::VS, back); // b.VS back 225 __ br(Assembler::VS, forth); // b.VS forth 226 __ br(Assembler::VC, __ pc()); // b.VC . 227 __ br(Assembler::VC, back); // b.VC back 228 __ br(Assembler::VC, forth); // b.VC forth 229 __ br(Assembler::HI, __ pc()); // b.HI . 230 __ br(Assembler::HI, back); // b.HI back 231 __ br(Assembler::HI, forth); // b.HI forth 232 __ br(Assembler::LS, __ pc()); // b.LS . 233 __ br(Assembler::LS, back); // b.LS back 234 __ br(Assembler::LS, forth); // b.LS forth 235 __ br(Assembler::GE, __ pc()); // b.GE . 236 __ br(Assembler::GE, back); // b.GE back 237 __ br(Assembler::GE, forth); // b.GE forth 238 __ br(Assembler::LT, __ pc()); // b.LT . 239 __ br(Assembler::LT, back); // b.LT back 240 __ br(Assembler::LT, forth); // b.LT forth 241 __ br(Assembler::GT, __ pc()); // b.GT . 242 __ br(Assembler::GT, back); // b.GT back 243 __ br(Assembler::GT, forth); // b.GT forth 244 __ br(Assembler::LE, __ pc()); // b.LE . 245 __ br(Assembler::LE, back); // b.LE back 246 __ br(Assembler::LE, forth); // b.LE forth 247 __ br(Assembler::AL, __ pc()); // b.AL . 248 __ br(Assembler::AL, back); // b.AL back 249 __ br(Assembler::AL, forth); // b.AL forth 250 __ br(Assembler::NV, __ pc()); // b.NV . 251 __ br(Assembler::NV, back); // b.NV back 252 __ br(Assembler::NV, forth); // b.NV forth 253 254 // ImmOp 255 __ svc(12999); // svc #12999 256 __ hvc(2665); // hvc #2665 257 __ smc(9002); // smc #9002 258 __ brk(14843); // brk #14843 259 __ hlt(25964); // hlt #25964 260 261 // Op 262 __ nop(); // nop 263 __ eret(); // eret 264 __ drps(); // drps 265 __ isb(); // isb 266 267 // SystemOp 268 __ dsb(Assembler::ST); // dsb ST 269 __ dmb(Assembler::OSHST); // dmb OSHST 270 271 // OneRegOp 272 __ br(r16); // br x16 273 __ blr(r20); // blr x20 274 275 // LoadStoreExclusiveOp 276 __ stxr(r10, r27, r8); // stxr w10, x27, [x8] 277 __ stlxr(r0, r1, r21); // stlxr w0, x1, [x21] 278 __ ldxr(r17, r29); // ldxr x17, [x29] 279 __ ldaxr(r29, r28); // ldaxr x29, [x28] 280 __ stlr(r1, r23); // stlr x1, [x23] 281 __ ldar(r21, r20); // ldar x21, [x20] 282 283 // LoadStoreExclusiveOp 284 __ stxrw(r22, r27, r19); // stxr w22, w27, [x19] 285 __ stlxrw(r11, r16, r6); // stlxr w11, w16, [x6] 286 __ ldxrw(r18, r0); // ldxr w18, [x0] 287 __ ldaxrw(r4, r10); // ldaxr w4, [x10] 288 __ stlrw(r24, r22); // stlr w24, [x22] 289 __ ldarw(r10, r19); // ldar w10, [x19] 290 291 // LoadStoreExclusiveOp 292 __ stxrh(r1, r5, r30); // stxrh w1, w5, [x30] 293 __ stlxrh(r8, r12, r17); // stlxrh w8, w12, [x17] 294 __ ldxrh(r9, r14); // ldxrh w9, [x14] 295 __ ldaxrh(r7, r1); // ldaxrh w7, [x1] 296 __ stlrh(r5, r16); // stlrh w5, [x16] 297 __ ldarh(r2, r12); // ldarh w2, [x12] 298 299 // LoadStoreExclusiveOp 300 __ stxrb(r10, r12, r3); // stxrb w10, w12, [x3] 301 __ stlxrb(r28, r14, r26); // stlxrb w28, w14, [x26] 302 __ ldxrb(r30, r10); // ldxrb w30, [x10] 303 __ ldaxrb(r14, r21); // ldaxrb w14, [x21] 304 __ stlrb(r13, r9); // stlrb w13, [x9] 305 __ ldarb(r22, r27); // ldarb w22, [x27] 306 307 // LoadStoreExclusiveOp 308 __ ldxp(r28, r19, r11); // ldxp x28, x19, [x11] 309 __ ldaxp(r30, r19, r2); // ldaxp x30, x19, [x2] 310 __ stxp(r2, r23, r1, r0); // stxp w2, x23, x1, [x0] 311 __ stlxp(r12, r16, r13, r15); // stlxp w12, x16, x13, [x15] 312 313 // LoadStoreExclusiveOp 314 __ ldxpw(r18, r21, r13); // ldxp w18, w21, [x13] 315 __ ldaxpw(r11, r30, r8); // ldaxp w11, w30, [x8] 316 __ stxpw(r24, r13, r11, r1); // stxp w24, w13, w11, [x1] 317 __ stlxpw(r26, r21, r27, r13); // stlxp w26, w21, w27, [x13] 318 319 // base_plus_unscaled_offset 320 // LoadStoreOp 321 __ str(r11, Address(r20, -103)); // str x11, [x20, -103] 322 __ strw(r28, Address(r16, 62)); // str w28, [x16, 62] 323 __ strb(r27, Address(r9, -9)); // strb w27, [x9, -9] 324 __ strh(r2, Address(r25, -50)); // strh w2, [x25, -50] 325 __ ldr(r4, Address(r2, -241)); // ldr x4, [x2, -241] 326 __ ldrw(r30, Address(r20, -31)); // ldr w30, [x20, -31] 327 __ ldrb(r18, Address(r23, -23)); // ldrb w18, [x23, -23] 328 __ ldrh(r29, Address(r26, -1)); // ldrh w29, [x26, -1] 329 __ ldrsb(r1, Address(r9, 6)); // ldrsb x1, [x9, 6] 330 __ ldrsh(r11, Address(r12, 19)); // ldrsh x11, [x12, 19] 331 __ ldrshw(r11, Address(r1, -50)); // ldrsh w11, [x1, -50] 332 __ ldrsw(r19, Address(r24, 41)); // ldrsw x19, [x24, 41] 333 __ ldrd(v24, Address(r24, 95)); // ldr d24, [x24, 95] 334 __ ldrs(v15, Address(r5, -43)); // ldr s15, [x5, -43] 335 __ strd(v21, Address(r27, 1)); // str d21, [x27, 1] 336 __ strs(v23, Address(r13, -107)); // str s23, [x13, -107] 337 338 // pre 339 // LoadStoreOp 340 __ str(r11, Address(__ pre(r0, 8))); // str x11, [x0, 8]! 341 __ strw(r3, Address(__ pre(r0, 29))); // str w3, [x0, 29]! 342 __ strb(r11, Address(__ pre(r14, 9))); // strb w11, [x14, 9]! 343 __ strh(r29, Address(__ pre(r24, -3))); // strh w29, [x24, -3]! 344 __ ldr(r13, Address(__ pre(r17, -144))); // ldr x13, [x17, -144]! 345 __ ldrw(r12, Address(__ pre(r22, -6))); // ldr w12, [x22, -6]! 346 __ ldrb(r13, Address(__ pre(r12, -10))); // ldrb w13, [x12, -10]! 347 __ ldrh(r0, Address(__ pre(r21, -21))); // ldrh w0, [x21, -21]! 348 __ ldrsb(r23, Address(__ pre(r7, 4))); // ldrsb x23, [x7, 4]! 349 __ ldrsh(r3, Address(__ pre(r7, -53))); // ldrsh x3, [x7, -53]! 350 __ ldrshw(r28, Address(__ pre(r5, -7))); // ldrsh w28, [x5, -7]! 351 __ ldrsw(r24, Address(__ pre(r9, -18))); // ldrsw x24, [x9, -18]! 352 __ ldrd(v14, Address(__ pre(r11, 12))); // ldr d14, [x11, 12]! 353 __ ldrs(v19, Address(__ pre(r12, -67))); // ldr s19, [x12, -67]! 354 __ strd(v20, Address(__ pre(r0, -253))); // str d20, [x0, -253]! 355 __ strs(v8, Address(__ pre(r0, 64))); // str s8, [x0, 64]! 356 357 // post 358 // LoadStoreOp 359 __ str(r4, Address(__ post(r28, -94))); // str x4, [x28], -94 360 __ strw(r12, Address(__ post(r7, -54))); // str w12, [x7], -54 361 __ strb(r27, Address(__ post(r10, -24))); // strb w27, [x10], -24 362 __ strh(r6, Address(__ post(r8, 27))); // strh w6, [x8], 27 363 __ ldr(r14, Address(__ post(r10, -202))); // ldr x14, [x10], -202 364 __ ldrw(r16, Address(__ post(r5, -41))); // ldr w16, [x5], -41 365 __ ldrb(r2, Address(__ post(r14, 9))); // ldrb w2, [x14], 9 366 __ ldrh(r28, Address(__ post(r13, -20))); // ldrh w28, [x13], -20 367 __ ldrsb(r9, Address(__ post(r13, -31))); // ldrsb x9, [x13], -31 368 __ ldrsh(r3, Address(__ post(r24, -36))); // ldrsh x3, [x24], -36 369 __ ldrshw(r20, Address(__ post(r3, 6))); // ldrsh w20, [x3], 6 370 __ ldrsw(r7, Address(__ post(r19, -1))); // ldrsw x7, [x19], -1 371 __ ldrd(v30, Address(__ post(r8, -130))); // ldr d30, [x8], -130 372 __ ldrs(v25, Address(__ post(r15, 21))); // ldr s25, [x15], 21 373 __ strd(v14, Address(__ post(r23, 90))); // str d14, [x23], 90 374 __ strs(v8, Address(__ post(r0, -33))); // str s8, [x0], -33 375 376 // base_plus_reg 377 // LoadStoreOp 378 __ str(r10, Address(r18, r21, Address::sxtw(3))); // str x10, [x18, w21, sxtw #3] 379 __ strw(r4, Address(r13, r22, Address::sxtw(2))); // str w4, [x13, w22, sxtw #2] 380 __ strb(r13, Address(r0, r19, Address::uxtw(0))); // strb w13, [x0, w19, uxtw #0] 381 __ strh(r12, Address(r27, r6, Address::sxtw(0))); // strh w12, [x27, w6, sxtw #0] 382 __ ldr(r0, Address(r8, r16, Address::lsl(0))); // ldr x0, [x8, x16, lsl #0] 383 __ ldrw(r0, Address(r4, r26, Address::sxtx(0))); // ldr w0, [x4, x26, sxtx #0] 384 __ ldrb(r14, Address(r25, r5, Address::sxtw(0))); // ldrb w14, [x25, w5, sxtw #0] 385 __ ldrh(r9, Address(r4, r18, Address::uxtw(0))); // ldrh w9, [x4, w18, uxtw #0] 386 __ ldrsb(r27, Address(r4, r7, Address::lsl(0))); // ldrsb x27, [x4, x7, lsl #0] 387 __ ldrsh(r15, Address(r17, r30, Address::sxtw(0))); // ldrsh x15, [x17, w30, sxtw #0] 388 __ ldrshw(r16, Address(r0, r22, Address::sxtw(0))); // ldrsh w16, [x0, w22, sxtw #0] 389 __ ldrsw(r22, Address(r10, r30, Address::sxtx(2))); // ldrsw x22, [x10, x30, sxtx #2] 390 __ ldrd(v29, Address(r21, r10, Address::sxtx(3))); // ldr d29, [x21, x10, sxtx #3] 391 __ ldrs(v3, Address(r11, r19, Address::uxtw(0))); // ldr s3, [x11, w19, uxtw #0] 392 __ strd(v13, Address(r28, r29, Address::uxtw(3))); // str d13, [x28, w29, uxtw #3] 393 __ strs(v23, Address(r29, r5, Address::sxtx(2))); // str s23, [x29, x5, sxtx #2] 394 395 // base_plus_scaled_offset 396 // LoadStoreOp 397 __ str(r5, Address(r8, 12600)); // str x5, [x8, 12600] 398 __ strw(r29, Address(r24, 7880)); // str w29, [x24, 7880] 399 __ strb(r19, Address(r17, 1566)); // strb w19, [x17, 1566] 400 __ strh(r13, Address(r19, 3984)); // strh w13, [x19, 3984] 401 __ ldr(r19, Address(r23, 13632)); // ldr x19, [x23, 13632] 402 __ ldrw(r23, Address(r29, 6264)); // ldr w23, [x29, 6264] 403 __ ldrb(r22, Address(r11, 2012)); // ldrb w22, [x11, 2012] 404 __ ldrh(r3, Address(r10, 3784)); // ldrh w3, [x10, 3784] 405 __ ldrsb(r8, Address(r16, 1951)); // ldrsb x8, [x16, 1951] 406 __ ldrsh(r23, Address(r20, 3346)); // ldrsh x23, [x20, 3346] 407 __ ldrshw(r2, Address(r1, 3994)); // ldrsh w2, [x1, 3994] 408 __ ldrsw(r4, Address(r17, 7204)); // ldrsw x4, [x17, 7204] 409 __ ldrd(v20, Address(r27, 14400)); // ldr d20, [x27, 14400] 410 __ ldrs(v25, Address(r14, 8096)); // ldr s25, [x14, 8096] 411 __ strd(v26, Address(r10, 15024)); // str d26, [x10, 15024] 412 __ strs(v9, Address(r3, 6936)); // str s9, [x3, 6936] 413 414 // pcrel 415 // LoadStoreOp 416 __ ldr(r27, forth); // ldr x27, forth 417 __ ldrw(r11, __ pc()); // ldr w11, . 418 419 // LoadStoreOp 420 __ prfm(Address(r3, -187)); // prfm PLDL1KEEP, [x3, -187] 421 422 // LoadStoreOp 423 __ prfm(__ pc()); // prfm PLDL1KEEP, . 424 425 // LoadStoreOp 426 __ prfm(Address(r29, r14, Address::lsl(0))); // prfm PLDL1KEEP, [x29, x14, lsl #0] 427 428 // LoadStoreOp 429 __ prfm(Address(r4, 13312)); // prfm PLDL1KEEP, [x4, 13312] 430 431 // AddSubCarryOp 432 __ adcw(r21, r1, r7); // adc w21, w1, w7 433 __ adcsw(r8, r5, r7); // adcs w8, w5, w7 434 __ sbcw(r7, r27, r14); // sbc w7, w27, w14 435 __ sbcsw(r27, r4, r17); // sbcs w27, w4, w17 436 __ adc(r0, r28, r0); // adc x0, x28, x0 437 __ adcs(r12, r24, r30); // adcs x12, x24, x30 438 __ sbc(r0, r25, r15); // sbc x0, x25, x15 439 __ sbcs(r1, r24, r3); // sbcs x1, x24, x3 440 441 // AddSubExtendedOp 442 __ addw(r18, r24, r20, ext::uxtb, 2); // add w18, w24, w20, uxtb #2 443 __ addsw(r13, r28, r10, ext::uxth, 1); // adds w13, w28, w10, uxth #1 444 __ sub(r15, r16, r2, ext::sxth, 2); // sub x15, x16, x2, sxth #2 445 __ subsw(r29, r13, r13, ext::uxth, 2); // subs w29, w13, w13, uxth #2 446 __ add(r12, r20, r12, ext::sxtw, 3); // add x12, x20, x12, sxtw #3 447 __ adds(r30, r27, r11, ext::sxtb, 1); // adds x30, x27, x11, sxtb #1 448 __ sub(r14, r7, r1, ext::sxtw, 2); // sub x14, x7, x1, sxtw #2 449 __ subs(r29, r3, r27, ext::sxth, 1); // subs x29, x3, x27, sxth #1 450 451 // ConditionalCompareOp 452 __ ccmnw(r0, r13, 14u, Assembler::MI); // ccmn w0, w13, #14, MI 453 __ ccmpw(r22, r18, 6u, Assembler::CC); // ccmp w22, w18, #6, CC 454 __ ccmn(r18, r30, 14u, Assembler::VS); // ccmn x18, x30, #14, VS 455 __ ccmp(r10, r19, 12u, Assembler::HI); // ccmp x10, x19, #12, HI 456 457 // ConditionalCompareImmedOp 458 __ ccmnw(r6, 18, 2, Assembler::LE); // ccmn w6, #18, #2, LE 459 __ ccmpw(r9, 13, 4, Assembler::HI); // ccmp w9, #13, #4, HI 460 __ ccmn(r21, 11, 11, Assembler::LO); // ccmn x21, #11, #11, LO 461 __ ccmp(r4, 13, 2, Assembler::VC); // ccmp x4, #13, #2, VC 462 463 // ConditionalSelectOp 464 __ cselw(r12, r2, r22, Assembler::HI); // csel w12, w2, w22, HI 465 __ csincw(r24, r16, r17, Assembler::HS); // csinc w24, w16, w17, HS 466 __ csinvw(r6, r7, r16, Assembler::LT); // csinv w6, w7, w16, LT 467 __ csnegw(r11, r27, r22, Assembler::LS); // csneg w11, w27, w22, LS 468 __ csel(r10, r3, r29, Assembler::LT); // csel x10, x3, x29, LT 469 __ csinc(r12, r26, r27, Assembler::CC); // csinc x12, x26, x27, CC 470 __ csinv(r15, r10, r21, Assembler::GT); // csinv x15, x10, x21, GT 471 __ csneg(r30, r23, r9, Assembler::GT); // csneg x30, x23, x9, GT 472 473 // TwoRegOp 474 __ rbitw(r30, r10); // rbit w30, w10 475 __ rev16w(r29, r15); // rev16 w29, w15 476 __ revw(r29, r30); // rev w29, w30 477 __ clzw(r25, r21); // clz w25, w21 478 __ clsw(r4, r0); // cls w4, w0 479 __ rbit(r18, r21); // rbit x18, x21 480 __ rev16(r29, r16); // rev16 x29, x16 481 __ rev32(r21, r20); // rev32 x21, x20 482 __ rev(r6, r19); // rev x6, x19 483 __ clz(r30, r3); // clz x30, x3 484 __ cls(r21, r19); // cls x21, x19 485 486 // ThreeRegOp 487 __ udivw(r11, r24, r0); // udiv w11, w24, w0 488 __ sdivw(r27, r25, r14); // sdiv w27, w25, w14 489 __ lslvw(r3, r14, r18); // lslv w3, w14, w18 490 __ lsrvw(r7, r15, r24); // lsrv w7, w15, w24 491 __ asrvw(r28, r17, r25); // asrv w28, w17, w25 492 __ rorvw(r2, r26, r28); // rorv w2, w26, w28 493 __ udiv(r5, r25, r26); // udiv x5, x25, x26 494 __ sdiv(r27, r16, r18); // sdiv x27, x16, x18 495 __ lslv(r6, r21, r12); // lslv x6, x21, x12 496 __ lsrv(r0, r4, r12); // lsrv x0, x4, x12 497 __ asrv(r27, r17, r28); // asrv x27, x17, x28 498 __ rorv(r28, r2, r18); // rorv x28, x2, x18 499 __ umulh(r10, r15, r14); // umulh x10, x15, x14 500 __ smulh(r14, r3, r25); // smulh x14, x3, x25 501 502 // FourRegMulOp 503 __ maddw(r15, r19, r14, r5); // madd w15, w19, w14, w5 504 __ msubw(r16, r4, r26, r25); // msub w16, w4, w26, w25 505 __ madd(r4, r2, r2, r12); // madd x4, x2, x2, x12 506 __ msub(r29, r17, r8, r7); // msub x29, x17, x8, x7 507 __ smaddl(r3, r4, r25, r4); // smaddl x3, w4, w25, x4 508 __ smsubl(r26, r25, r4, r17); // smsubl x26, w25, w4, x17 509 __ umaddl(r0, r26, r17, r23); // umaddl x0, w26, w17, x23 510 __ umsubl(r15, r21, r28, r17); // umsubl x15, w21, w28, x17 511 512 // ThreeRegFloatOp 513 __ fmuls(v27, v10, v3); // fmul s27, s10, s3 514 __ fdivs(v0, v7, v25); // fdiv s0, s7, s25 515 __ fadds(v9, v6, v15); // fadd s9, s6, s15 516 __ fsubs(v29, v15, v10); // fsub s29, s15, s10 517 __ fmuls(v2, v17, v7); // fmul s2, s17, s7 518 __ fmuld(v11, v11, v23); // fmul d11, d11, d23 519 __ fdivd(v7, v29, v23); // fdiv d7, d29, d23 520 __ faddd(v14, v27, v11); // fadd d14, d27, d11 521 __ fsubd(v11, v4, v24); // fsub d11, d4, d24 522 __ fmuld(v12, v15, v14); // fmul d12, d15, d14 523 524 // FourRegFloatOp 525 __ fmadds(v20, v11, v28, v13); // fmadd s20, s11, s28, s13 526 __ fmsubs(v11, v12, v23, v30); // fmsub s11, s12, s23, s30 527 __ fnmadds(v26, v14, v9, v13); // fnmadd s26, s14, s9, s13 528 __ fnmadds(v10, v7, v5, v29); // fnmadd s10, s7, s5, s29 529 __ fmaddd(v15, v3, v11, v12); // fmadd d15, d3, d11, d12 530 __ fmsubd(v15, v30, v30, v17); // fmsub d15, d30, d30, d17 531 __ fnmaddd(v19, v20, v15, v15); // fnmadd d19, d20, d15, d15 532 __ fnmaddd(v9, v21, v2, v9); // fnmadd d9, d21, d2, d9 533 534 // TwoRegFloatOp 535 __ fmovs(v27, v7); // fmov s27, s7 536 __ fabss(v29, v30); // fabs s29, s30 537 __ fnegs(v17, v1); // fneg s17, s1 538 __ fsqrts(v2, v6); // fsqrt s2, s6 539 __ fcvts(v10, v3); // fcvt d10, s3 540 __ fmovd(v24, v11); // fmov d24, d11 541 __ fabsd(v7, v1); // fabs d7, d1 542 __ fnegd(v11, v0); // fneg d11, d0 543 __ fsqrtd(v3, v18); // fsqrt d3, d18 544 __ fcvtd(v28, v6); // fcvt s28, d6 545 546 // FloatConvertOp 547 __ fcvtzsw(r22, v6); // fcvtzs w22, s6 548 __ fcvtzs(r0, v27); // fcvtzs x0, s27 549 __ fcvtzdw(r26, v2); // fcvtzs w26, d2 550 __ fcvtzd(r5, v7); // fcvtzs x5, d7 551 __ scvtfws(v28, r11); // scvtf s28, w11 552 __ scvtfs(v25, r13); // scvtf s25, x13 553 __ scvtfwd(v11, r23); // scvtf d11, w23 554 __ scvtfd(v19, r8); // scvtf d19, x8 555 __ fmovs(r18, v21); // fmov w18, s21 556 __ fmovd(r25, v20); // fmov x25, d20 557 __ fmovs(v19, r18); // fmov s19, w18 558 __ fmovd(v2, r29); // fmov d2, x29 559 560 // TwoRegFloatOp 561 __ fcmps(v22, v8); // fcmp s22, s8 562 __ fcmpd(v21, v19); // fcmp d21, d19 563 __ fcmps(v20, 0.0); // fcmp s20, #0.0 564 __ fcmpd(v11, 0.0); // fcmp d11, #0.0 565 566 // LoadStorePairOp 567 __ stpw(r20, r6, Address(r15, -32)); // stp w20, w6, [x15, #-32] 568 __ ldpw(r27, r14, Address(r3, -208)); // ldp w27, w14, [x3, #-208] 569 __ ldpsw(r17, r10, Address(r11, -80)); // ldpsw x17, x10, [x11, #-80] 570 __ stp(r7, r7, Address(r14, 64)); // stp x7, x7, [x14, #64] 571 __ ldp(r12, r23, Address(r0, 112)); // ldp x12, x23, [x0, #112] 572 573 // LoadStorePairOp 574 __ stpw(r13, r7, Address(__ pre(r6, -80))); // stp w13, w7, [x6, #-80]! 575 __ ldpw(r30, r16, Address(__ pre(r2, -144))); // ldp w30, w16, [x2, #-144]! 576 __ ldpsw(r4, r1, Address(__ pre(r26, -144))); // ldpsw x4, x1, [x26, #-144]! 577 __ stp(r23, r14, Address(__ pre(r11, 64))); // stp x23, x14, [x11, #64]! 578 __ ldp(r29, r27, Address(__ pre(r21, -192))); // ldp x29, x27, [x21, #-192]! 579 580 // LoadStorePairOp 581 __ stpw(r22, r5, Address(__ post(r21, -48))); // stp w22, w5, [x21], #-48 582 __ ldpw(r27, r17, Address(__ post(r6, -32))); // ldp w27, w17, [x6], #-32 583 __ ldpsw(r17, r6, Address(__ post(r1, -80))); // ldpsw x17, x6, [x1], #-80 584 __ stp(r13, r20, Address(__ post(r21, -208))); // stp x13, x20, [x21], #-208 585 __ ldp(r30, r27, Address(__ post(r10, 80))); // ldp x30, x27, [x10], #80 586 587 // LoadStorePairOp 588 __ stnpw(r5, r17, Address(r11, 16)); // stnp w5, w17, [x11, #16] 589 __ ldnpw(r14, r4, Address(r26, -96)); // ldnp w14, w4, [x26, #-96] 590 __ stnp(r23, r29, Address(r12, 32)); // stnp x23, x29, [x12, #32] 591 __ ldnp(r0, r6, Address(r21, -80)); // ldnp x0, x6, [x21, #-80] 592 593 // LdStSIMDOp 594 __ ld1(v15, __ T8B, Address(r26)); // ld1 {v15.8B}, [x26] 595 __ ld1(v23, v24, __ T16B, Address(__ post(r11, 32))); // ld1 {v23.16B, v24.16B}, [x11], 32 596 __ ld1(v8, v9, v10, __ T1D, Address(__ post(r23, r7))); // ld1 {v8.1D, v9.1D, v10.1D}, [x23], x7 597 __ ld1(v19, v20, v21, v22, __ T8H, Address(__ post(r25, 64))); // ld1 {v19.8H, v20.8H, v21.8H, v22.8H}, [x25], 64 598 __ ld1r(v29, __ T8B, Address(r17)); // ld1r {v29.8B}, [x17] 599 __ ld1r(v24, __ T4S, Address(__ post(r23, 4))); // ld1r {v24.4S}, [x23], 4 600 __ ld1r(v10, __ T1D, Address(__ post(r5, r25))); // ld1r {v10.1D}, [x5], x25 601 __ ld2(v18, v19, __ T2D, Address(r10)); // ld2 {v18.2D, v19.2D}, [x10] 602 __ ld2(v12, v13, __ T4H, Address(__ post(r15, 16))); // ld2 {v12.4H, v13.4H}, [x15], 16 603 __ ld2r(v25, v26, __ T16B, Address(r18)); // ld2r {v25.16B, v26.16B}, [x18] 604 __ ld2r(v1, v2, __ T2S, Address(__ post(r30, 8))); // ld2r {v1.2S, v2.2S}, [x30], 8 605 __ ld2r(v16, v17, __ T2D, Address(__ post(r18, r9))); // ld2r {v16.2D, v17.2D}, [x18], x9 606 __ ld3(v25, v26, v27, __ T4S, Address(__ post(r12, r2))); // ld3 {v25.4S, v26.4S, v27.4S}, [x12], x2 607 __ ld3(v26, v27, v28, __ T2S, Address(r19)); // ld3 {v26.2S, v27.2S, v28.2S}, [x19] 608 __ ld3r(v15, v16, v17, __ T8H, Address(r21)); // ld3r {v15.8H, v16.8H, v17.8H}, [x21] 609 __ ld3r(v25, v26, v27, __ T4S, Address(__ post(r13, 12))); // ld3r {v25.4S, v26.4S, v27.4S}, [x13], 12 610 __ ld3r(v14, v15, v16, __ T1D, Address(__ post(r28, r29))); // ld3r {v14.1D, v15.1D, v16.1D}, [x28], x29 611 __ ld4(v17, v18, v19, v20, __ T8H, Address(__ post(r29, 64))); // ld4 {v17.8H, v18.8H, v19.8H, v20.8H}, [x29], 64 612 __ ld4(v27, v28, v29, v30, __ T8B, Address(__ post(r7, r0))); // ld4 {v27.8B, v28.8B, v29.8B, v30.8B}, [x7], x0 613 __ ld4r(v24, v25, v26, v27, __ T8B, Address(r18)); // ld4r {v24.8B, v25.8B, v26.8B, v27.8B}, [x18] 614 __ ld4r(v0, v1, v2, v3, __ T4H, Address(__ post(r26, 8))); // ld4r {v0.4H, v1.4H, v2.4H, v3.4H}, [x26], 8 615 __ ld4r(v12, v13, v14, v15, __ T2S, Address(__ post(r25, r2))); // ld4r {v12.2S, v13.2S, v14.2S, v15.2S}, [x25], x2 616 617 // SHA512SIMDOp 618 __ sha512h(v22, __ T2D, v27, v4); // sha512h q22, q27, v4.2D 619 __ sha512h2(v7, __ T2D, v6, v1); // sha512h2 q7, q6, v1.2D 620 __ sha512su0(v26, __ T2D, v15); // sha512su0 v26.2D, v15.2D 621 __ sha512su1(v2, __ T2D, v13, v13); // sha512su1 v2.2D, v13.2D, v13.2D 622 623 // SpecialCases 624 __ ccmn(zr, zr, 3u, Assembler::LE); // ccmn xzr, xzr, #3, LE 625 __ ccmnw(zr, zr, 5u, Assembler::EQ); // ccmn wzr, wzr, #5, EQ 626 __ ccmp(zr, 1, 4u, Assembler::NE); // ccmp xzr, 1, #4, NE 627 __ ccmpw(zr, 2, 2, Assembler::GT); // ccmp wzr, 2, #2, GT 628 __ extr(zr, zr, zr, 0); // extr xzr, xzr, xzr, 0 629 __ stlxp(r0, zr, zr, sp); // stlxp w0, xzr, xzr, [sp] 630 __ stlxpw(r2, zr, zr, r3); // stlxp w2, wzr, wzr, [x3] 631 __ stxp(r4, zr, zr, r5); // stxp w4, xzr, xzr, [x5] 632 __ stxpw(r6, zr, zr, sp); // stxp w6, wzr, wzr, [sp] 633 __ dup(v0, __ T16B, zr); // dup v0.16b, wzr 634 __ mov(v1, __ T1D, 0, zr); // mov v1.d[0], xzr 635 __ mov(v1, __ T2S, 1, zr); // mov v1.s[1], wzr 636 __ mov(v1, __ T4H, 2, zr); // mov v1.h[2], wzr 637 __ mov(v1, __ T8B, 3, zr); // mov v1.b[3], wzr 638 __ ld1(v31, v0, __ T2D, Address(__ post(r1, r0))); // ld1 {v31.2d, v0.2d}, [x1], x0 639 __ sve_cpy(z0, __ S, p0, v1); // mov z0.s, p0/m, s1 640 __ sve_inc(r0, __ S); // incw x0 641 __ sve_dec(r1, __ H); // dech x1 642 __ sve_lsl(z0, __ B, z1, 7); // lsl z0.b, z1.b, #7 643 __ sve_lsl(z21, __ H, z1, 15); // lsl z21.h, z1.h, #15 644 __ sve_lsl(z0, __ S, z1, 31); // lsl z0.s, z1.s, #31 645 __ sve_lsl(z0, __ D, z1, 63); // lsl z0.d, z1.d, #63 646 __ sve_lsr(z0, __ B, z1, 7); // lsr z0.b, z1.b, #7 647 __ sve_asr(z0, __ H, z11, 15); // asr z0.h, z11.h, #15 648 __ sve_lsr(z30, __ S, z1, 31); // lsr z30.s, z1.s, #31 649 __ sve_asr(z0, __ D, z1, 63); // asr z0.d, z1.d, #63 650 __ sve_addvl(sp, r0, 31); // addvl sp, x0, #31 651 __ sve_addpl(r1, sp, -32); // addpl x1, sp, -32 652 __ sve_cntp(r8, __ B, p0, p1); // cntp x8, p0, p1.b 653 __ sve_dup(z0, __ B, 127); // dup z0.b, 127 654 __ sve_dup(z1, __ H, -128); // dup z1.h, -128 655 __ sve_dup(z2, __ S, 32512); // dup z2.s, 32512 656 __ sve_dup(z7, __ D, -32768); // dup z7.d, -32768 657 __ sve_ld1b(z0, __ B, p0, Address(sp)); // ld1b {z0.b}, p0/z, [sp] 658 __ sve_ld1h(z10, __ H, p1, Address(sp, -8)); // ld1h {z10.h}, p1/z, [sp, #-8, MUL VL] 659 __ sve_ld1w(z20, __ S, p2, Address(r0, 7)); // ld1w {z20.s}, p2/z, [x0, #7, MUL VL] 660 __ sve_ld1b(z30, __ B, p3, Address(sp, r8)); // ld1b {z30.b}, p3/z, [sp, x8] 661 __ sve_ld1w(z0, __ S, p4, Address(sp, r28)); // ld1w {z0.s}, p4/z, [sp, x28, LSL #2] 662 __ sve_ld1d(z11, __ D, p5, Address(r0, r1)); // ld1d {z11.d}, p5/z, [x0, x1, LSL #3] 663 __ sve_st1b(z22, __ B, p6, Address(sp)); // st1b {z22.b}, p6, [sp] 664 __ sve_st1b(z31, __ B, p7, Address(sp, -8)); // st1b {z31.b}, p7, [sp, #-8, MUL VL] 665 __ sve_st1w(z0, __ S, p1, Address(r0, 7)); // st1w {z0.s}, p1, [x0, #7, MUL VL] 666 __ sve_st1b(z0, __ B, p2, Address(sp, r1)); // st1b {z0.b}, p2, [sp, x1] 667 __ sve_st1h(z0, __ H, p3, Address(sp, r8)); // st1h {z0.h}, p3, [sp, x8, LSL #1] 668 __ sve_st1d(z0, __ D, p4, Address(r0, r18)); // st1d {z0.d}, p4, [x0, x18, LSL #3] 669 __ sve_ldr(z0, Address(sp)); // ldr z0, [sp] 670 __ sve_ldr(z31, Address(sp, -256)); // ldr z31, [sp, #-256, MUL VL] 671 __ sve_str(z8, Address(r8, 255)); // str z8, [x8, #255, MUL VL] 672 673 // FloatImmediateOp 674 __ fmovd(v0, 2.0); // fmov d0, #2.0 675 __ fmovd(v0, 2.125); // fmov d0, #2.125 676 __ fmovd(v0, 4.0); // fmov d0, #4.0 677 __ fmovd(v0, 4.25); // fmov d0, #4.25 678 __ fmovd(v0, 8.0); // fmov d0, #8.0 679 __ fmovd(v0, 8.5); // fmov d0, #8.5 680 __ fmovd(v0, 16.0); // fmov d0, #16.0 681 __ fmovd(v0, 17.0); // fmov d0, #17.0 682 __ fmovd(v0, 0.125); // fmov d0, #0.125 683 __ fmovd(v0, 0.1328125); // fmov d0, #0.1328125 684 __ fmovd(v0, 0.25); // fmov d0, #0.25 685 __ fmovd(v0, 0.265625); // fmov d0, #0.265625 686 __ fmovd(v0, 0.5); // fmov d0, #0.5 687 __ fmovd(v0, 0.53125); // fmov d0, #0.53125 688 __ fmovd(v0, 1.0); // fmov d0, #1.0 689 __ fmovd(v0, 1.0625); // fmov d0, #1.0625 690 __ fmovd(v0, -2.0); // fmov d0, #-2.0 691 __ fmovd(v0, -2.125); // fmov d0, #-2.125 692 __ fmovd(v0, -4.0); // fmov d0, #-4.0 693 __ fmovd(v0, -4.25); // fmov d0, #-4.25 694 __ fmovd(v0, -8.0); // fmov d0, #-8.0 695 __ fmovd(v0, -8.5); // fmov d0, #-8.5 696 __ fmovd(v0, -16.0); // fmov d0, #-16.0 697 __ fmovd(v0, -17.0); // fmov d0, #-17.0 698 __ fmovd(v0, -0.125); // fmov d0, #-0.125 699 __ fmovd(v0, -0.1328125); // fmov d0, #-0.1328125 700 __ fmovd(v0, -0.25); // fmov d0, #-0.25 701 __ fmovd(v0, -0.265625); // fmov d0, #-0.265625 702 __ fmovd(v0, -0.5); // fmov d0, #-0.5 703 __ fmovd(v0, -0.53125); // fmov d0, #-0.53125 704 __ fmovd(v0, -1.0); // fmov d0, #-1.0 705 __ fmovd(v0, -1.0625); // fmov d0, #-1.0625 706 707 // LSEOp 708 __ swp(Assembler::xword, r24, r24, r4); // swp x24, x24, [x4] 709 __ ldadd(Assembler::xword, r20, r16, r0); // ldadd x20, x16, [x0] 710 __ ldbic(Assembler::xword, r4, r21, r11); // ldclr x4, x21, [x11] 711 __ ldeor(Assembler::xword, r30, r16, r22); // ldeor x30, x16, [x22] 712 __ ldorr(Assembler::xword, r4, r15, r23); // ldset x4, x15, [x23] 713 __ ldsmin(Assembler::xword, r26, r6, r12); // ldsmin x26, x6, [x12] 714 __ ldsmax(Assembler::xword, r15, r14, r15); // ldsmax x15, x14, [x15] 715 __ ldumin(Assembler::xword, r9, r25, r29); // ldumin x9, x25, [x29] 716 __ ldumax(Assembler::xword, r11, r20, r12); // ldumax x11, x20, [x12] 717 718 // LSEOp 719 __ swpa(Assembler::xword, r18, r22, r16); // swpa x18, x22, [x16] 720 __ ldadda(Assembler::xword, r21, r24, r26); // ldadda x21, x24, [x26] 721 __ ldbica(Assembler::xword, r6, r6, r16); // ldclra x6, x6, [x16] 722 __ ldeora(Assembler::xword, r16, r25, r16); // ldeora x16, x25, [x16] 723 __ ldorra(Assembler::xword, r28, r24, r16); // ldseta x28, x24, [x16] 724 __ ldsmina(Assembler::xword, r26, r15, r10); // ldsmina x26, x15, [x10] 725 __ ldsmaxa(Assembler::xword, r13, r14, r20); // ldsmaxa x13, x14, [x20] 726 __ ldumina(Assembler::xword, r1, r23, r30); // ldumina x1, x23, [x30] 727 __ ldumaxa(Assembler::xword, r14, r2, r6); // ldumaxa x14, x2, [x6] 728 729 // LSEOp 730 __ swpal(Assembler::xword, r3, r8, r25); // swpal x3, x8, [x25] 731 __ ldaddal(Assembler::xword, r0, r27, r30); // ldaddal x0, x27, [x30] 732 __ ldbical(Assembler::xword, r5, r5, r30); // ldclral x5, x5, [x30] 733 __ ldeoral(Assembler::xword, r11, r25, r0); // ldeoral x11, x25, [x0] 734 __ ldorral(Assembler::xword, zr, r0, r19); // ldsetal xzr, x0, [x19] 735 __ ldsminal(Assembler::xword, r29, r26, r9); // ldsminal x29, x26, [x9] 736 __ ldsmaxal(Assembler::xword, r26, r12, r15); // ldsmaxal x26, x12, [x15] 737 __ lduminal(Assembler::xword, r11, r11, r18); // lduminal x11, x11, [x18] 738 __ ldumaxal(Assembler::xword, r25, r22, r24); // ldumaxal x25, x22, [x24] 739 740 // LSEOp 741 __ swpl(Assembler::xword, r0, r17, r11); // swpl x0, x17, [x11] 742 __ ldaddl(Assembler::xword, r6, r29, r6); // ldaddl x6, x29, [x6] 743 __ ldbicl(Assembler::xword, r5, r5, r21); // ldclrl x5, x5, [x21] 744 __ ldeorl(Assembler::xword, r19, r16, r18); // ldeorl x19, x16, [x18] 745 __ ldorrl(Assembler::xword, r30, r27, r28); // ldsetl x30, x27, [x28] 746 __ ldsminl(Assembler::xword, r1, r28, r1); // ldsminl x1, x28, [x1] 747 __ ldsmaxl(Assembler::xword, r20, r29, r16); // ldsmaxl x20, x29, [x16] 748 __ lduminl(Assembler::xword, r13, r10, r29); // lduminl x13, x10, [x29] 749 __ ldumaxl(Assembler::xword, r29, r19, r22); // ldumaxl x29, x19, [x22] 750 751 // LSEOp 752 __ swp(Assembler::word, r10, r4, sp); // swp w10, w4, [sp] 753 __ ldadd(Assembler::word, r21, r8, sp); // ldadd w21, w8, [sp] 754 __ ldbic(Assembler::word, r19, r10, r28); // ldclr w19, w10, [x28] 755 __ ldeor(Assembler::word, r2, r25, r5); // ldeor w2, w25, [x5] 756 __ ldorr(Assembler::word, r3, r8, r22); // ldset w3, w8, [x22] 757 __ ldsmin(Assembler::word, r19, r13, r5); // ldsmin w19, w13, [x5] 758 __ ldsmax(Assembler::word, r29, r24, r21); // ldsmax w29, w24, [x21] 759 __ ldumin(Assembler::word, r26, r24, r3); // ldumin w26, w24, [x3] 760 __ ldumax(Assembler::word, r24, r26, r23); // ldumax w24, w26, [x23] 761 762 // LSEOp 763 __ swpa(Assembler::word, r15, r21, r3); // swpa w15, w21, [x3] 764 __ ldadda(Assembler::word, r24, r8, r25); // ldadda w24, w8, [x25] 765 __ ldbica(Assembler::word, r20, r16, r17); // ldclra w20, w16, [x17] 766 __ ldeora(Assembler::word, r2, r1, r0); // ldeora w2, w1, [x0] 767 __ ldorra(Assembler::word, r24, r4, r3); // ldseta w24, w4, [x3] 768 __ ldsmina(Assembler::word, r12, zr, r28); // ldsmina w12, wzr, [x28] 769 __ ldsmaxa(Assembler::word, r10, r26, r2); // ldsmaxa w10, w26, [x2] 770 __ ldumina(Assembler::word, r12, r18, sp); // ldumina w12, w18, [sp] 771 __ ldumaxa(Assembler::word, r1, r13, r29); // ldumaxa w1, w13, [x29] 772 773 // LSEOp 774 __ swpal(Assembler::word, r0, r19, r12); // swpal w0, w19, [x12] 775 __ ldaddal(Assembler::word, r17, r22, r13); // ldaddal w17, w22, [x13] 776 __ ldbical(Assembler::word, r28, r30, sp); // ldclral w28, w30, [sp] 777 __ ldeoral(Assembler::word, r1, r26, r28); // ldeoral w1, w26, [x28] 778 __ ldorral(Assembler::word, r4, r30, r4); // ldsetal w4, w30, [x4] 779 __ ldsminal(Assembler::word, r6, r30, r26); // ldsminal w6, w30, [x26] 780 __ ldsmaxal(Assembler::word, r18, r9, r8); // ldsmaxal w18, w9, [x8] 781 __ lduminal(Assembler::word, r12, r0, r20); // lduminal w12, w0, [x20] 782 __ ldumaxal(Assembler::word, r1, r24, r2); // ldumaxal w1, w24, [x2] 783 784 // LSEOp 785 __ swpl(Assembler::word, r0, r9, r24); // swpl w0, w9, [x24] 786 __ ldaddl(Assembler::word, r26, r16, r30); // ldaddl w26, w16, [x30] 787 __ ldbicl(Assembler::word, r3, r10, r23); // ldclrl w3, w10, [x23] 788 __ ldeorl(Assembler::word, r10, r4, r18); // ldeorl w10, w4, [x18] 789 __ ldorrl(Assembler::word, r2, r11, r8); // ldsetl w2, w11, [x8] 790 __ ldsminl(Assembler::word, r10, r15, r17); // ldsminl w10, w15, [x17] 791 __ ldsmaxl(Assembler::word, r2, r10, r12); // ldsmaxl w2, w10, [x12] 792 __ lduminl(Assembler::word, r12, r15, r13); // lduminl w12, w15, [x13] 793 __ ldumaxl(Assembler::word, r2, r7, r20); // ldumaxl w2, w7, [x20] 794 795 // SVEVectorOp 796 __ sve_add(z25, __ B, z15, z4); // add z25.b, z15.b, z4.b 797 __ sve_sub(z4, __ S, z11, z17); // sub z4.s, z11.s, z17.s 798 __ sve_fadd(z16, __ D, z17, z10); // fadd z16.d, z17.d, z10.d 799 __ sve_fmul(z22, __ D, z12, z25); // fmul z22.d, z12.d, z25.d 800 __ sve_fsub(z28, __ D, z14, z10); // fsub z28.d, z14.d, z10.d 801 __ sve_abs(z1, __ H, p3, z30); // abs z1.h, p3/m, z30.h 802 __ sve_add(z15, __ B, p1, z2); // add z15.b, p1/m, z15.b, z2.b 803 __ sve_asr(z13, __ S, p4, z16); // asr z13.s, p4/m, z13.s, z16.s 804 __ sve_cnt(z3, __ D, p0, z11); // cnt z3.d, p0/m, z11.d 805 __ sve_lsl(z5, __ D, p2, z14); // lsl z5.d, p2/m, z5.d, z14.d 806 __ sve_lsr(z29, __ B, p0, z20); // lsr z29.b, p0/m, z29.b, z20.b 807 __ sve_mul(z20, __ S, p5, z27); // mul z20.s, p5/m, z20.s, z27.s 808 __ sve_neg(z26, __ B, p6, z4); // neg z26.b, p6/m, z4.b 809 __ sve_not(z22, __ B, p4, z30); // not z22.b, p4/m, z30.b 810 __ sve_smax(z11, __ H, p2, z27); // smax z11.h, p2/m, z11.h, z27.h 811 __ sve_smin(z28, __ S, p5, z30); // smin z28.s, p5/m, z28.s, z30.s 812 __ sve_sub(z30, __ S, p1, z13); // sub z30.s, p1/m, z30.s, z13.s 813 __ sve_fabs(z30, __ D, p4, z26); // fabs z30.d, p4/m, z26.d 814 __ sve_fadd(z15, __ S, p3, z11); // fadd z15.s, p3/m, z15.s, z11.s 815 __ sve_fdiv(z6, __ D, p7, z16); // fdiv z6.d, p7/m, z6.d, z16.d 816 __ sve_fmax(z27, __ S, p7, z7); // fmax z27.s, p7/m, z27.s, z7.s 817 __ sve_fmin(z19, __ D, p2, z4); // fmin z19.d, p2/m, z19.d, z4.d 818 __ sve_fmul(z17, __ S, p4, z22); // fmul z17.s, p4/m, z17.s, z22.s 819 __ sve_fneg(z28, __ D, p3, z21); // fneg z28.d, p3/m, z21.d 820 __ sve_frintm(z18, __ S, p5, z2); // frintm z18.s, p5/m, z2.s 821 __ sve_frintn(z6, __ S, p3, z15); // frintn z6.s, p3/m, z15.s 822 __ sve_frintp(z12, __ D, p5, z1); // frintp z12.d, p5/m, z1.d 823 __ sve_fsqrt(z18, __ S, p1, z17); // fsqrt z18.s, p1/m, z17.s 824 __ sve_fsub(z15, __ S, p5, z13); // fsub z15.s, p5/m, z15.s, z13.s 825 __ sve_fmla(z20, __ D, p7, z27, z11); // fmla z20.d, p7/m, z27.d, z11.d 826 __ sve_fmls(z3, __ D, p0, z30, z23); // fmls z3.d, p0/m, z30.d, z23.d 827 __ sve_fnmla(z17, __ S, p2, z27, z26); // fnmla z17.s, p2/m, z27.s, z26.s 828 __ sve_fnmls(z6, __ D, p5, z22, z30); // fnmls z6.d, p5/m, z22.d, z30.d 829 __ sve_mla(z2, __ H, p7, z26, z18); // mla z2.h, p7/m, z26.h, z18.h 830 __ sve_mls(z22, __ B, p4, z2, z17); // mls z22.b, p4/m, z2.b, z17.b 831 __ sve_and(z24, z25, z22); // and z24.d, z25.d, z22.d 832 __ sve_eor(z18, z12, z3); // eor z18.d, z12.d, z3.d 833 __ sve_orr(z29, z28, z16); // orr z29.d, z28.d, z16.d 834 835 // SVEReductionOp 836 __ sve_andv(v6, __ S, p2, z28); // andv s6, p2, z28.s 837 __ sve_orv(v7, __ H, p1, z7); // orv h7, p1, z7.h 838 __ sve_eorv(v9, __ B, p5, z8); // eorv b9, p5, z8.b 839 __ sve_smaxv(v27, __ B, p5, z30); // smaxv b27, p5, z30.b 840 __ sve_sminv(v26, __ H, p0, z16); // sminv h26, p0, z16.h 841 __ sve_fminv(v3, __ D, p6, z8); // fminv d3, p6, z8.d 842 __ sve_fmaxv(v21, __ D, p6, z26); // fmaxv d21, p6, z26.d 843 __ sve_fadda(v22, __ S, p0, z4); // fadda s22, p0, s22, z4.s 844 __ sve_uaddv(v17, __ H, p0, z3); // uaddv d17, p0, z3.h 845 846 __ bind(forth); 847 848 /* 849 aarch64ops.o: file format elf64-littleaarch64 850 851 852 Disassembly of section .text: 853 854 0000000000000000 <back>: 855 0: 8b0d82fa add x26, x23, x13, lsl #32 856 4: cb49970c sub x12, x24, x9, lsr #37 857 8: ab889dfc adds x28, x15, x8, asr #39 858 c: eb9ee787 subs x7, x28, x30, asr #57 859 10: 0b9b3ec9 add w9, w22, w27, asr #15 860 14: 4b9279a3 sub w3, w13, w18, asr #30 861 18: 2b88474e adds w14, w26, w8, asr #17 862 1c: 6b8c56c0 subs w0, w22, w12, asr #21 863 20: 8a1a51e0 and x0, x15, x26, lsl #20 864 24: aa11f4ba orr x26, x5, x17, lsl #61 865 28: ca0281b8 eor x24, x13, x2, lsl #32 866 2c: ea918c7c ands x28, x3, x17, asr #35 867 30: 0a5d4a19 and w25, w16, w29, lsr #18 868 34: 2a4b264d orr w13, w18, w11, lsr #9 869 38: 4a523ca5 eor w5, w5, w18, lsr #15 870 3c: 6a9b6ae2 ands w2, w23, w27, asr #26 871 40: 8a70b79b bic x27, x28, x16, lsr #45 872 44: aaba9728 orn x8, x25, x26, asr #37 873 48: ca6dfe3d eon x29, x17, x13, lsr #63 874 4c: ea627f1c bics x28, x24, x2, lsr #31 875 50: 0aa70f53 bic w19, w26, w7, asr #3 876 54: 2aaa0f06 orn w6, w24, w10, asr #3 877 58: 4a6176a4 eon w4, w21, w1, lsr #29 878 5c: 6a604eb0 bics w16, w21, w0, lsr #19 879 60: 1105ed91 add w17, w12, #0x17b 880 64: 3100583e adds w30, w1, #0x16 881 68: 5101f8bd sub w29, w5, #0x7e 882 6c: 710f0306 subs w6, w24, #0x3c0 883 70: 9101a1a0 add x0, x13, #0x68 884 74: b10a5cc8 adds x8, x6, #0x297 885 78: d10810aa sub x10, x5, #0x204 886 7c: f10fd061 subs x1, x3, #0x3f4 887 80: 120cb166 and w6, w11, #0xfff1fff1 888 84: 321764bc orr w28, w5, #0xfffffe07 889 88: 52174681 eor w1, w20, #0x7fffe00 890 8c: 720c0247 ands w7, w18, #0x100000 891 90: 9241018e and x14, x12, #0x8000000000000000 892 94: b25a2969 orr x9, x11, #0x1ffc000000000 893 98: d278b411 eor x17, x0, #0x3fffffffffff00 894 9c: f26aad01 ands x1, x8, #0xffffffffffc00003 895 a0: 14000000 b a0 <back+0xa0> 896 a4: 17ffffd7 b 0 <back> 897 a8: 14000242 b 9b0 <forth> 898 ac: 94000000 bl ac <back+0xac> 899 b0: 97ffffd4 bl 0 <back> 900 b4: 9400023f bl 9b0 <forth> 901 b8: 3400000a cbz w10, b8 <back+0xb8> 902 bc: 34fffa2a cbz w10, 0 <back> 903 c0: 3400478a cbz w10, 9b0 <forth> 904 c4: 35000008 cbnz w8, c4 <back+0xc4> 905 c8: 35fff9c8 cbnz w8, 0 <back> 906 cc: 35004728 cbnz w8, 9b0 <forth> 907 d0: b400000b cbz x11, d0 <back+0xd0> 908 d4: b4fff96b cbz x11, 0 <back> 909 d8: b40046cb cbz x11, 9b0 <forth> 910 dc: b500001d cbnz x29, dc <back+0xdc> 911 e0: b5fff91d cbnz x29, 0 <back> 912 e4: b500467d cbnz x29, 9b0 <forth> 913 e8: 10000013 adr x19, e8 <back+0xe8> 914 ec: 10fff8b3 adr x19, 0 <back> 915 f0: 10004613 adr x19, 9b0 <forth> 916 f4: 90000013 adrp x19, 0 <back> 917 f8: 36300016 tbz w22, #6, f8 <back+0xf8> 918 fc: 3637f836 tbz w22, #6, 0 <back> 919 100: 36304596 tbz w22, #6, 9b0 <forth> 920 104: 3758000c tbnz w12, #11, 104 <back+0x104> 921 108: 375ff7cc tbnz w12, #11, 0 <back> 922 10c: 3758452c tbnz w12, #11, 9b0 <forth> 923 110: 128313a0 mov w0, #0xffffe762 // #-6302 924 114: 528a32c7 mov w7, #0x5196 // #20886 925 118: 7289173b movk w27, #0x48b9 926 11c: 92ab3acc mov x12, #0xffffffffa629ffff // #-1507196929 927 120: d2a0bf94 mov x20, #0x5fc0000 // #100401152 928 124: f2c285e8 movk x8, #0x142f, lsl #32 929 128: 9358722f sbfx x15, x17, #24, #5 930 12c: 330e652f bfxil w15, w9, #14, #12 931 130: 53067f3b lsr w27, w25, #6 932 134: 93577c53 sbfx x19, x2, #23, #9 933 138: b34a1aac bfi x12, x21, #54, #7 934 13c: d35a4016 ubfiz x22, x0, #38, #17 935 140: 13946c63 extr w3, w3, w20, #27 936 144: 93c3dbc8 extr x8, x30, x3, #54 937 148: 54000000 b.eq 148 <back+0x148> // b.none 938 14c: 54fff5a0 b.eq 0 <back> // b.none 939 150: 54004300 b.eq 9b0 <forth> // b.none 940 154: 54000001 b.ne 154 <back+0x154> // b.any 941 158: 54fff541 b.ne 0 <back> // b.any 942 15c: 540042a1 b.ne 9b0 <forth> // b.any 943 160: 54000002 b.cs 160 <back+0x160> // b.hs, b.nlast 944 164: 54fff4e2 b.cs 0 <back> // b.hs, b.nlast 945 168: 54004242 b.cs 9b0 <forth> // b.hs, b.nlast 946 16c: 54000002 b.cs 16c <back+0x16c> // b.hs, b.nlast 947 170: 54fff482 b.cs 0 <back> // b.hs, b.nlast 948 174: 540041e2 b.cs 9b0 <forth> // b.hs, b.nlast 949 178: 54000003 b.cc 178 <back+0x178> // b.lo, b.ul, b.last 950 17c: 54fff423 b.cc 0 <back> // b.lo, b.ul, b.last 951 180: 54004183 b.cc 9b0 <forth> // b.lo, b.ul, b.last 952 184: 54000003 b.cc 184 <back+0x184> // b.lo, b.ul, b.last 953 188: 54fff3c3 b.cc 0 <back> // b.lo, b.ul, b.last 954 18c: 54004123 b.cc 9b0 <forth> // b.lo, b.ul, b.last 955 190: 54000004 b.mi 190 <back+0x190> // b.first 956 194: 54fff364 b.mi 0 <back> // b.first 957 198: 540040c4 b.mi 9b0 <forth> // b.first 958 19c: 54000005 b.pl 19c <back+0x19c> // b.nfrst 959 1a0: 54fff305 b.pl 0 <back> // b.nfrst 960 1a4: 54004065 b.pl 9b0 <forth> // b.nfrst 961 1a8: 54000006 b.vs 1a8 <back+0x1a8> 962 1ac: 54fff2a6 b.vs 0 <back> 963 1b0: 54004006 b.vs 9b0 <forth> 964 1b4: 54000007 b.vc 1b4 <back+0x1b4> 965 1b8: 54fff247 b.vc 0 <back> 966 1bc: 54003fa7 b.vc 9b0 <forth> 967 1c0: 54000008 b.hi 1c0 <back+0x1c0> // b.pmore 968 1c4: 54fff1e8 b.hi 0 <back> // b.pmore 969 1c8: 54003f48 b.hi 9b0 <forth> // b.pmore 970 1cc: 54000009 b.ls 1cc <back+0x1cc> // b.plast 971 1d0: 54fff189 b.ls 0 <back> // b.plast 972 1d4: 54003ee9 b.ls 9b0 <forth> // b.plast 973 1d8: 5400000a b.ge 1d8 <back+0x1d8> // b.tcont 974 1dc: 54fff12a b.ge 0 <back> // b.tcont 975 1e0: 54003e8a b.ge 9b0 <forth> // b.tcont 976 1e4: 5400000b b.lt 1e4 <back+0x1e4> // b.tstop 977 1e8: 54fff0cb b.lt 0 <back> // b.tstop 978 1ec: 54003e2b b.lt 9b0 <forth> // b.tstop 979 1f0: 5400000c b.gt 1f0 <back+0x1f0> 980 1f4: 54fff06c b.gt 0 <back> 981 1f8: 54003dcc b.gt 9b0 <forth> 982 1fc: 5400000d b.le 1fc <back+0x1fc> 983 200: 54fff00d b.le 0 <back> 984 204: 54003d6d b.le 9b0 <forth> 985 208: 5400000e b.al 208 <back+0x208> 986 20c: 54ffefae b.al 0 <back> 987 210: 54003d0e b.al 9b0 <forth> 988 214: 5400000f b.nv 214 <back+0x214> 989 218: 54ffef4f b.nv 0 <back> 990 21c: 54003caf b.nv 9b0 <forth> 991 220: d40658e1 svc #0x32c7 992 224: d4014d22 hvc #0xa69 993 228: d4046543 smc #0x232a 994 22c: d4273f60 brk #0x39fb 995 230: d44cad80 hlt #0x656c 996 234: d503201f nop 997 238: d69f03e0 eret 998 23c: d6bf03e0 drps 999 240: d5033fdf isb 1000 244: d5033e9f dsb st 1001 248: d50332bf dmb oshst 1002 24c: d61f0200 br x16 1003 250: d63f0280 blr x20 1004 254: c80a7d1b stxr w10, x27, [x8] 1005 258: c800fea1 stlxr w0, x1, [x21] 1006 25c: c85f7fb1 ldxr x17, [x29] 1007 260: c85fff9d ldaxr x29, [x28] 1008 264: c89ffee1 stlr x1, [x23] 1009 268: c8dffe95 ldar x21, [x20] 1010 26c: 88167e7b stxr w22, w27, [x19] 1011 270: 880bfcd0 stlxr w11, w16, [x6] 1012 274: 885f7c12 ldxr w18, [x0] 1013 278: 885ffd44 ldaxr w4, [x10] 1014 27c: 889ffed8 stlr w24, [x22] 1015 280: 88dffe6a ldar w10, [x19] 1016 284: 48017fc5 stxrh w1, w5, [x30] 1017 288: 4808fe2c stlxrh w8, w12, [x17] 1018 28c: 485f7dc9 ldxrh w9, [x14] 1019 290: 485ffc27 ldaxrh w7, [x1] 1020 294: 489ffe05 stlrh w5, [x16] 1021 298: 48dffd82 ldarh w2, [x12] 1022 29c: 080a7c6c stxrb w10, w12, [x3] 1023 2a0: 081cff4e stlxrb w28, w14, [x26] 1024 2a4: 085f7d5e ldxrb w30, [x10] 1025 2a8: 085ffeae ldaxrb w14, [x21] 1026 2ac: 089ffd2d stlrb w13, [x9] 1027 2b0: 08dfff76 ldarb w22, [x27] 1028 2b4: c87f4d7c ldxp x28, x19, [x11] 1029 2b8: c87fcc5e ldaxp x30, x19, [x2] 1030 2bc: c8220417 stxp w2, x23, x1, [x0] 1031 2c0: c82cb5f0 stlxp w12, x16, x13, [x15] 1032 2c4: 887f55b2 ldxp w18, w21, [x13] 1033 2c8: 887ff90b ldaxp w11, w30, [x8] 1034 2cc: 88382c2d stxp w24, w13, w11, [x1] 1035 2d0: 883aedb5 stlxp w26, w21, w27, [x13] 1036 2d4: f819928b stur x11, [x20, #-103] 1037 2d8: b803e21c stur w28, [x16, #62] 1038 2dc: 381f713b sturb w27, [x9, #-9] 1039 2e0: 781ce322 sturh w2, [x25, #-50] 1040 2e4: f850f044 ldur x4, [x2, #-241] 1041 2e8: b85e129e ldur w30, [x20, #-31] 1042 2ec: 385e92f2 ldurb w18, [x23, #-23] 1043 2f0: 785ff35d ldurh w29, [x26, #-1] 1044 2f4: 39801921 ldrsb x1, [x9, #6] 1045 2f8: 7881318b ldursh x11, [x12, #19] 1046 2fc: 78dce02b ldursh w11, [x1, #-50] 1047 300: b8829313 ldursw x19, [x24, #41] 1048 304: fc45f318 ldur d24, [x24, #95] 1049 308: bc5d50af ldur s15, [x5, #-43] 1050 30c: fc001375 stur d21, [x27, #1] 1051 310: bc1951b7 stur s23, [x13, #-107] 1052 314: f8008c0b str x11, [x0, #8]! 1053 318: b801dc03 str w3, [x0, #29]! 1054 31c: 38009dcb strb w11, [x14, #9]! 1055 320: 781fdf1d strh w29, [x24, #-3]! 1056 324: f8570e2d ldr x13, [x17, #-144]! 1057 328: b85faecc ldr w12, [x22, #-6]! 1058 32c: 385f6d8d ldrb w13, [x12, #-10]! 1059 330: 785ebea0 ldrh w0, [x21, #-21]! 1060 334: 38804cf7 ldrsb x23, [x7, #4]! 1061 338: 789cbce3 ldrsh x3, [x7, #-53]! 1062 33c: 78df9cbc ldrsh w28, [x5, #-7]! 1063 340: b89eed38 ldrsw x24, [x9, #-18]! 1064 344: fc40cd6e ldr d14, [x11, #12]! 1065 348: bc5bdd93 ldr s19, [x12, #-67]! 1066 34c: fc103c14 str d20, [x0, #-253]! 1067 350: bc040c08 str s8, [x0, #64]! 1068 354: f81a2784 str x4, [x28], #-94 1069 358: b81ca4ec str w12, [x7], #-54 1070 35c: 381e855b strb w27, [x10], #-24 1071 360: 7801b506 strh w6, [x8], #27 1072 364: f853654e ldr x14, [x10], #-202 1073 368: b85d74b0 ldr w16, [x5], #-41 1074 36c: 384095c2 ldrb w2, [x14], #9 1075 370: 785ec5bc ldrh w28, [x13], #-20 1076 374: 389e15a9 ldrsb x9, [x13], #-31 1077 378: 789dc703 ldrsh x3, [x24], #-36 1078 37c: 78c06474 ldrsh w20, [x3], #6 1079 380: b89ff667 ldrsw x7, [x19], #-1 1080 384: fc57e51e ldr d30, [x8], #-130 1081 388: bc4155f9 ldr s25, [x15], #21 1082 38c: fc05a6ee str d14, [x23], #90 1083 390: bc1df408 str s8, [x0], #-33 1084 394: f835da4a str x10, [x18, w21, sxtw #3] 1085 398: b836d9a4 str w4, [x13, w22, sxtw #2] 1086 39c: 3833580d strb w13, [x0, w19, uxtw #0] 1087 3a0: 7826cb6c strh w12, [x27, w6, sxtw] 1088 3a4: f8706900 ldr x0, [x8, x16] 1089 3a8: b87ae880 ldr w0, [x4, x26, sxtx] 1090 3ac: 3865db2e ldrb w14, [x25, w5, sxtw #0] 1091 3b0: 78724889 ldrh w9, [x4, w18, uxtw] 1092 3b4: 38a7789b ldrsb x27, [x4, x7, lsl #0] 1093 3b8: 78beca2f ldrsh x15, [x17, w30, sxtw] 1094 3bc: 78f6c810 ldrsh w16, [x0, w22, sxtw] 1095 3c0: b8bef956 ldrsw x22, [x10, x30, sxtx #2] 1096 3c4: fc6afabd ldr d29, [x21, x10, sxtx #3] 1097 3c8: bc734963 ldr s3, [x11, w19, uxtw] 1098 3cc: fc3d5b8d str d13, [x28, w29, uxtw #3] 1099 3d0: bc25fbb7 str s23, [x29, x5, sxtx #2] 1100 3d4: f9189d05 str x5, [x8, #12600] 1101 3d8: b91ecb1d str w29, [x24, #7880] 1102 3dc: 39187a33 strb w19, [x17, #1566] 1103 3e0: 791f226d strh w13, [x19, #3984] 1104 3e4: f95aa2f3 ldr x19, [x23, #13632] 1105 3e8: b9587bb7 ldr w23, [x29, #6264] 1106 3ec: 395f7176 ldrb w22, [x11, #2012] 1107 3f0: 795d9143 ldrh w3, [x10, #3784] 1108 3f4: 399e7e08 ldrsb x8, [x16, #1951] 1109 3f8: 799a2697 ldrsh x23, [x20, #3346] 1110 3fc: 79df3422 ldrsh w2, [x1, #3994] 1111 400: b99c2624 ldrsw x4, [x17, #7204] 1112 404: fd5c2374 ldr d20, [x27, #14400] 1113 408: bd5fa1d9 ldr s25, [x14, #8096] 1114 40c: fd1d595a str d26, [x10, #15024] 1115 410: bd1b1869 str s9, [x3, #6936] 1116 414: 58002cfb ldr x27, 9b0 <forth> 1117 418: 1800000b ldr w11, 418 <back+0x418> 1118 41c: f8945060 prfum pldl1keep, [x3, #-187] 1119 420: d8000000 prfm pldl1keep, 420 <back+0x420> 1120 424: f8ae6ba0 prfm pldl1keep, [x29, x14] 1121 428: f99a0080 prfm pldl1keep, [x4, #13312] 1122 42c: 1a070035 adc w21, w1, w7 1123 430: 3a0700a8 adcs w8, w5, w7 1124 434: 5a0e0367 sbc w7, w27, w14 1125 438: 7a11009b sbcs w27, w4, w17 1126 43c: 9a000380 adc x0, x28, x0 1127 440: ba1e030c adcs x12, x24, x30 1128 444: da0f0320 sbc x0, x25, x15 1129 448: fa030301 sbcs x1, x24, x3 1130 44c: 0b340b12 add w18, w24, w20, uxtb #2 1131 450: 2b2a278d adds w13, w28, w10, uxth #1 1132 454: cb22aa0f sub x15, x16, w2, sxth #2 1133 458: 6b2d29bd subs w29, w13, w13, uxth #2 1134 45c: 8b2cce8c add x12, x20, w12, sxtw #3 1135 460: ab2b877e adds x30, x27, w11, sxtb #1 1136 464: cb21c8ee sub x14, x7, w1, sxtw #2 1137 468: eb3ba47d subs x29, x3, w27, sxth #1 1138 46c: 3a4d400e ccmn w0, w13, #0xe, mi // mi = first 1139 470: 7a5232c6 ccmp w22, w18, #0x6, cc // cc = lo, ul, last 1140 474: ba5e624e ccmn x18, x30, #0xe, vs 1141 478: fa53814c ccmp x10, x19, #0xc, hi // hi = pmore 1142 47c: 3a52d8c2 ccmn w6, #0x12, #0x2, le 1143 480: 7a4d8924 ccmp w9, #0xd, #0x4, hi // hi = pmore 1144 484: ba4b3aab ccmn x21, #0xb, #0xb, cc // cc = lo, ul, last 1145 488: fa4d7882 ccmp x4, #0xd, #0x2, vc 1146 48c: 1a96804c csel w12, w2, w22, hi // hi = pmore 1147 490: 1a912618 csinc w24, w16, w17, cs // cs = hs, nlast 1148 494: 5a90b0e6 csinv w6, w7, w16, lt // lt = tstop 1149 498: 5a96976b csneg w11, w27, w22, ls // ls = plast 1150 49c: 9a9db06a csel x10, x3, x29, lt // lt = tstop 1151 4a0: 9a9b374c csinc x12, x26, x27, cc // cc = lo, ul, last 1152 4a4: da95c14f csinv x15, x10, x21, gt 1153 4a8: da89c6fe csneg x30, x23, x9, gt 1154 4ac: 5ac0015e rbit w30, w10 1155 4b0: 5ac005fd rev16 w29, w15 1156 4b4: 5ac00bdd rev w29, w30 1157 4b8: 5ac012b9 clz w25, w21 1158 4bc: 5ac01404 cls w4, w0 1159 4c0: dac002b2 rbit x18, x21 1160 4c4: dac0061d rev16 x29, x16 1161 4c8: dac00a95 rev32 x21, x20 1162 4cc: dac00e66 rev x6, x19 1163 4d0: dac0107e clz x30, x3 1164 4d4: dac01675 cls x21, x19 1165 4d8: 1ac00b0b udiv w11, w24, w0 1166 4dc: 1ace0f3b sdiv w27, w25, w14 1167 4e0: 1ad221c3 lsl w3, w14, w18 1168 4e4: 1ad825e7 lsr w7, w15, w24 1169 4e8: 1ad92a3c asr w28, w17, w25 1170 4ec: 1adc2f42 ror w2, w26, w28 1171 4f0: 9ada0b25 udiv x5, x25, x26 1172 4f4: 9ad20e1b sdiv x27, x16, x18 1173 4f8: 9acc22a6 lsl x6, x21, x12 1174 4fc: 9acc2480 lsr x0, x4, x12 1175 500: 9adc2a3b asr x27, x17, x28 1176 504: 9ad22c5c ror x28, x2, x18 1177 508: 9bce7dea umulh x10, x15, x14 1178 50c: 9b597c6e smulh x14, x3, x25 1179 510: 1b0e166f madd w15, w19, w14, w5 1180 514: 1b1ae490 msub w16, w4, w26, w25 1181 518: 9b023044 madd x4, x2, x2, x12 1182 51c: 9b089e3d msub x29, x17, x8, x7 1183 520: 9b391083 smaddl x3, w4, w25, x4 1184 524: 9b24c73a smsubl x26, w25, w4, x17 1185 528: 9bb15f40 umaddl x0, w26, w17, x23 1186 52c: 9bbcc6af umsubl x15, w21, w28, x17 1187 530: 1e23095b fmul s27, s10, s3 1188 534: 1e3918e0 fdiv s0, s7, s25 1189 538: 1e2f28c9 fadd s9, s6, s15 1190 53c: 1e2a39fd fsub s29, s15, s10 1191 540: 1e270a22 fmul s2, s17, s7 1192 544: 1e77096b fmul d11, d11, d23 1193 548: 1e771ba7 fdiv d7, d29, d23 1194 54c: 1e6b2b6e fadd d14, d27, d11 1195 550: 1e78388b fsub d11, d4, d24 1196 554: 1e6e09ec fmul d12, d15, d14 1197 558: 1f1c3574 fmadd s20, s11, s28, s13 1198 55c: 1f17f98b fmsub s11, s12, s23, s30 1199 560: 1f2935da fnmadd s26, s14, s9, s13 1200 564: 1f2574ea fnmadd s10, s7, s5, s29 1201 568: 1f4b306f fmadd d15, d3, d11, d12 1202 56c: 1f5ec7cf fmsub d15, d30, d30, d17 1203 570: 1f6f3e93 fnmadd d19, d20, d15, d15 1204 574: 1f6226a9 fnmadd d9, d21, d2, d9 1205 578: 1e2040fb fmov s27, s7 1206 57c: 1e20c3dd fabs s29, s30 1207 580: 1e214031 fneg s17, s1 1208 584: 1e21c0c2 fsqrt s2, s6 1209 588: 1e22c06a fcvt d10, s3 1210 58c: 1e604178 fmov d24, d11 1211 590: 1e60c027 fabs d7, d1 1212 594: 1e61400b fneg d11, d0 1213 598: 1e61c243 fsqrt d3, d18 1214 59c: 1e6240dc fcvt s28, d6 1215 5a0: 1e3800d6 fcvtzs w22, s6 1216 5a4: 9e380360 fcvtzs x0, s27 1217 5a8: 1e78005a fcvtzs w26, d2 1218 5ac: 9e7800e5 fcvtzs x5, d7 1219 5b0: 1e22017c scvtf s28, w11 1220 5b4: 9e2201b9 scvtf s25, x13 1221 5b8: 1e6202eb scvtf d11, w23 1222 5bc: 9e620113 scvtf d19, x8 1223 5c0: 1e2602b2 fmov w18, s21 1224 5c4: 9e660299 fmov x25, d20 1225 5c8: 1e270253 fmov s19, w18 1226 5cc: 9e6703a2 fmov d2, x29 1227 5d0: 1e2822c0 fcmp s22, s8 1228 5d4: 1e7322a0 fcmp d21, d19 1229 5d8: 1e202288 fcmp s20, #0.0 1230 5dc: 1e602168 fcmp d11, #0.0 1231 5e0: 293c19f4 stp w20, w6, [x15, #-32] 1232 5e4: 2966387b ldp w27, w14, [x3, #-208] 1233 5e8: 69762971 ldpsw x17, x10, [x11, #-80] 1234 5ec: a9041dc7 stp x7, x7, [x14, #64] 1235 5f0: a9475c0c ldp x12, x23, [x0, #112] 1236 5f4: 29b61ccd stp w13, w7, [x6, #-80]! 1237 5f8: 29ee405e ldp w30, w16, [x2, #-144]! 1238 5fc: 69ee0744 ldpsw x4, x1, [x26, #-144]! 1239 600: a9843977 stp x23, x14, [x11, #64]! 1240 604: a9f46ebd ldp x29, x27, [x21, #-192]! 1241 608: 28ba16b6 stp w22, w5, [x21], #-48 1242 60c: 28fc44db ldp w27, w17, [x6], #-32 1243 610: 68f61831 ldpsw x17, x6, [x1], #-80 1244 614: a8b352ad stp x13, x20, [x21], #-208 1245 618: a8c56d5e ldp x30, x27, [x10], #80 1246 61c: 28024565 stnp w5, w17, [x11, #16] 1247 620: 2874134e ldnp w14, w4, [x26, #-96] 1248 624: a8027597 stnp x23, x29, [x12, #32] 1249 628: a87b1aa0 ldnp x0, x6, [x21, #-80] 1250 62c: 0c40734f ld1 {v15.8b}, [x26] 1251 630: 4cdfa177 ld1 {v23.16b, v24.16b}, [x11], #32 1252 634: 0cc76ee8 ld1 {v8.1d-v10.1d}, [x23], x7 1253 638: 4cdf2733 ld1 {v19.8h-v22.8h}, [x25], #64 1254 63c: 0d40c23d ld1r {v29.8b}, [x17] 1255 640: 4ddfcaf8 ld1r {v24.4s}, [x23], #4 1256 644: 0dd9ccaa ld1r {v10.1d}, [x5], x25 1257 648: 4c408d52 ld2 {v18.2d, v19.2d}, [x10] 1258 64c: 0cdf85ec ld2 {v12.4h, v13.4h}, [x15], #16 1259 650: 4d60c259 ld2r {v25.16b, v26.16b}, [x18] 1260 654: 0dffcbc1 ld2r {v1.2s, v2.2s}, [x30], #8 1261 658: 4de9ce50 ld2r {v16.2d, v17.2d}, [x18], x9 1262 65c: 4cc24999 ld3 {v25.4s-v27.4s}, [x12], x2 1263 660: 0c404a7a ld3 {v26.2s-v28.2s}, [x19] 1264 664: 4d40e6af ld3r {v15.8h-v17.8h}, [x21] 1265 668: 4ddfe9b9 ld3r {v25.4s-v27.4s}, [x13], #12 1266 66c: 0dddef8e ld3r {v14.1d-v16.1d}, [x28], x29 1267 670: 4cdf07b1 ld4 {v17.8h-v20.8h}, [x29], #64 1268 674: 0cc000fb ld4 {v27.8b-v30.8b}, [x7], x0 1269 678: 0d60e258 ld4r {v24.8b-v27.8b}, [x18] 1270 67c: 0dffe740 ld4r {v0.4h-v3.4h}, [x26], #8 1271 680: 0de2eb2c ld4r {v12.2s-v15.2s}, [x25], x2 1272 684: ce648376 sha512h q22, q27, v4.2d 1273 688: ce6184c7 sha512h2 q7, q6, v1.2d 1274 68c: cec081fa sha512su0 v26.2d, v15.2d 1275 690: ce6d89a2 sha512su1 v2.2d, v13.2d, v13.2d 1276 694: ba5fd3e3 ccmn xzr, xzr, #0x3, le 1277 698: 3a5f03e5 ccmn wzr, wzr, #0x5, eq // eq = none 1278 69c: fa411be4 ccmp xzr, #0x1, #0x4, ne // ne = any 1279 6a0: 7a42cbe2 ccmp wzr, #0x2, #0x2, gt 1280 6a4: 93df03ff ror xzr, xzr, #0 1281 6a8: c820ffff stlxp w0, xzr, xzr, [sp] 1282 6ac: 8822fc7f stlxp w2, wzr, wzr, [x3] 1283 6b0: c8247cbf stxp w4, xzr, xzr, [x5] 1284 6b4: 88267fff stxp w6, wzr, wzr, [sp] 1285 6b8: 4e010fe0 dup v0.16b, wzr 1286 6bc: 4e081fe1 mov v1.d[0], xzr 1287 6c0: 4e0c1fe1 mov v1.s[1], wzr 1288 6c4: 4e0a1fe1 mov v1.h[2], wzr 1289 6c8: 4e071fe1 mov v1.b[3], wzr 1290 6cc: 4cc0ac3f ld1 {v31.2d, v0.2d}, [x1], x0 1291 6d0: 05a08020 mov z0.s, p0/m, s1 1292 6d4: 04b0e3e0 incw x0 1293 6d8: 0470e7e1 dech x1 1294 6dc: 042f9c20 lsl z0.b, z1.b, #7 1295 6e0: 043f9c35 lsl z21.h, z1.h, #15 1296 6e4: 047f9c20 lsl z0.s, z1.s, #31 1297 6e8: 04ff9c20 lsl z0.d, z1.d, #63 1298 6ec: 04299420 lsr z0.b, z1.b, #7 1299 6f0: 04319160 asr z0.h, z11.h, #15 1300 6f4: 0461943e lsr z30.s, z1.s, #31 1301 6f8: 04a19020 asr z0.d, z1.d, #63 1302 6fc: 042053ff addvl sp, x0, #31 1303 700: 047f5401 addpl x1, sp, #-32 1304 704: 25208028 cntp x8, p0, p1.b 1305 708: 2538cfe0 mov z0.b, #127 1306 70c: 2578d001 mov z1.h, #-128 1307 710: 25b8efe2 mov z2.s, #32512 1308 714: 25f8f007 mov z7.d, #-32768 1309 718: a400a3e0 ld1b {z0.b}, p0/z, [sp] 1310 71c: a4a8a7ea ld1h {z10.h}, p1/z, [sp, #-8, mul vl] 1311 720: a547a814 ld1w {z20.s}, p2/z, [x0, #7, mul vl] 1312 724: a4084ffe ld1b {z30.b}, p3/z, [sp, x8] 1313 728: a55c53e0 ld1w {z0.s}, p4/z, [sp, x28, lsl #2] 1314 72c: a5e1540b ld1d {z11.d}, p5/z, [x0, x1, lsl #3] 1315 730: e400fbf6 st1b {z22.b}, p6, [sp] 1316 734: e408ffff st1b {z31.b}, p7, [sp, #-8, mul vl] 1317 738: e547e400 st1w {z0.s}, p1, [x0, #7, mul vl] 1318 73c: e4014be0 st1b {z0.b}, p2, [sp, x1] 1319 740: e4a84fe0 st1h {z0.h}, p3, [sp, x8, lsl #1] 1320 744: e5f25000 st1d {z0.d}, p4, [x0, x18, lsl #3] 1321 748: 858043e0 ldr z0, [sp] 1322 74c: 85a043ff ldr z31, [sp, #-256, mul vl] 1323 750: e59f5d08 str z8, [x8, #255, mul vl] 1324 754: 1e601000 fmov d0, #2.000000000000000000e+00 1325 758: 1e603000 fmov d0, #2.125000000000000000e+00 1326 75c: 1e621000 fmov d0, #4.000000000000000000e+00 1327 760: 1e623000 fmov d0, #4.250000000000000000e+00 1328 764: 1e641000 fmov d0, #8.000000000000000000e+00 1329 768: 1e643000 fmov d0, #8.500000000000000000e+00 1330 76c: 1e661000 fmov d0, #1.600000000000000000e+01 1331 770: 1e663000 fmov d0, #1.700000000000000000e+01 1332 774: 1e681000 fmov d0, #1.250000000000000000e-01 1333 778: 1e683000 fmov d0, #1.328125000000000000e-01 1334 77c: 1e6a1000 fmov d0, #2.500000000000000000e-01 1335 780: 1e6a3000 fmov d0, #2.656250000000000000e-01 1336 784: 1e6c1000 fmov d0, #5.000000000000000000e-01 1337 788: 1e6c3000 fmov d0, #5.312500000000000000e-01 1338 78c: 1e6e1000 fmov d0, #1.000000000000000000e+00 1339 790: 1e6e3000 fmov d0, #1.062500000000000000e+00 1340 794: 1e701000 fmov d0, #-2.000000000000000000e+00 1341 798: 1e703000 fmov d0, #-2.125000000000000000e+00 1342 79c: 1e721000 fmov d0, #-4.000000000000000000e+00 1343 7a0: 1e723000 fmov d0, #-4.250000000000000000e+00 1344 7a4: 1e741000 fmov d0, #-8.000000000000000000e+00 1345 7a8: 1e743000 fmov d0, #-8.500000000000000000e+00 1346 7ac: 1e761000 fmov d0, #-1.600000000000000000e+01 1347 7b0: 1e763000 fmov d0, #-1.700000000000000000e+01 1348 7b4: 1e781000 fmov d0, #-1.250000000000000000e-01 1349 7b8: 1e783000 fmov d0, #-1.328125000000000000e-01 1350 7bc: 1e7a1000 fmov d0, #-2.500000000000000000e-01 1351 7c0: 1e7a3000 fmov d0, #-2.656250000000000000e-01 1352 7c4: 1e7c1000 fmov d0, #-5.000000000000000000e-01 1353 7c8: 1e7c3000 fmov d0, #-5.312500000000000000e-01 1354 7cc: 1e7e1000 fmov d0, #-1.000000000000000000e+00 1355 7d0: 1e7e3000 fmov d0, #-1.062500000000000000e+00 1356 7d4: f8388098 swp x24, x24, [x4] 1357 7d8: f8340010 ldadd x20, x16, [x0] 1358 7dc: f8241175 ldclr x4, x21, [x11] 1359 7e0: f83e22d0 ldeor x30, x16, [x22] 1360 7e4: f82432ef ldset x4, x15, [x23] 1361 7e8: f83a5186 ldsmin x26, x6, [x12] 1362 7ec: f82f41ee ldsmax x15, x14, [x15] 1363 7f0: f82973b9 ldumin x9, x25, [x29] 1364 7f4: f82b6194 ldumax x11, x20, [x12] 1365 7f8: f8b28216 swpa x18, x22, [x16] 1366 7fc: f8b50358 ldadda x21, x24, [x26] 1367 800: f8a61206 ldclra x6, x6, [x16] 1368 804: f8b02219 ldeora x16, x25, [x16] 1369 808: f8bc3218 ldseta x28, x24, [x16] 1370 80c: f8ba514f ldsmina x26, x15, [x10] 1371 810: f8ad428e ldsmaxa x13, x14, [x20] 1372 814: f8a173d7 ldumina x1, x23, [x30] 1373 818: f8ae60c2 ldumaxa x14, x2, [x6] 1374 81c: f8e38328 swpal x3, x8, [x25] 1375 820: f8e003db ldaddal x0, x27, [x30] 1376 824: f8e513c5 ldclral x5, x5, [x30] 1377 828: f8eb2019 ldeoral x11, x25, [x0] 1378 82c: f8ff3260 ldsetal xzr, x0, [x19] 1379 830: f8fd513a ldsminal x29, x26, [x9] 1380 834: f8fa41ec ldsmaxal x26, x12, [x15] 1381 838: f8eb724b lduminal x11, x11, [x18] 1382 83c: f8f96316 ldumaxal x25, x22, [x24] 1383 840: f8608171 swpl x0, x17, [x11] 1384 844: f86600dd ldaddl x6, x29, [x6] 1385 848: f86512a5 ldclrl x5, x5, [x21] 1386 84c: f8732250 ldeorl x19, x16, [x18] 1387 850: f87e339b ldsetl x30, x27, [x28] 1388 854: f861503c ldsminl x1, x28, [x1] 1389 858: f874421d ldsmaxl x20, x29, [x16] 1390 85c: f86d73aa lduminl x13, x10, [x29] 1391 860: f87d62d3 ldumaxl x29, x19, [x22] 1392 864: b82a83e4 swp w10, w4, [sp] 1393 868: b83503e8 ldadd w21, w8, [sp] 1394 86c: b833138a ldclr w19, w10, [x28] 1395 870: b82220b9 ldeor w2, w25, [x5] 1396 874: b82332c8 ldset w3, w8, [x22] 1397 878: b83350ad ldsmin w19, w13, [x5] 1398 87c: b83d42b8 ldsmax w29, w24, [x21] 1399 880: b83a7078 ldumin w26, w24, [x3] 1400 884: b83862fa ldumax w24, w26, [x23] 1401 888: b8af8075 swpa w15, w21, [x3] 1402 88c: b8b80328 ldadda w24, w8, [x25] 1403 890: b8b41230 ldclra w20, w16, [x17] 1404 894: b8a22001 ldeora w2, w1, [x0] 1405 898: b8b83064 ldseta w24, w4, [x3] 1406 89c: b8ac539f ldsmina w12, wzr, [x28] 1407 8a0: b8aa405a ldsmaxa w10, w26, [x2] 1408 8a4: b8ac73f2 ldumina w12, w18, [sp] 1409 8a8: b8a163ad ldumaxa w1, w13, [x29] 1410 8ac: b8e08193 swpal w0, w19, [x12] 1411 8b0: b8f101b6 ldaddal w17, w22, [x13] 1412 8b4: b8fc13fe ldclral w28, w30, [sp] 1413 8b8: b8e1239a ldeoral w1, w26, [x28] 1414 8bc: b8e4309e ldsetal w4, w30, [x4] 1415 8c0: b8e6535e ldsminal w6, w30, [x26] 1416 8c4: b8f24109 ldsmaxal w18, w9, [x8] 1417 8c8: b8ec7280 lduminal w12, w0, [x20] 1418 8cc: b8e16058 ldumaxal w1, w24, [x2] 1419 8d0: b8608309 swpl w0, w9, [x24] 1420 8d4: b87a03d0 ldaddl w26, w16, [x30] 1421 8d8: b86312ea ldclrl w3, w10, [x23] 1422 8dc: b86a2244 ldeorl w10, w4, [x18] 1423 8e0: b862310b ldsetl w2, w11, [x8] 1424 8e4: b86a522f ldsminl w10, w15, [x17] 1425 8e8: b862418a ldsmaxl w2, w10, [x12] 1426 8ec: b86c71af lduminl w12, w15, [x13] 1427 8f0: b8626287 ldumaxl w2, w7, [x20] 1428 8f4: 042401f9 add z25.b, z15.b, z4.b 1429 8f8: 04b10564 sub z4.s, z11.s, z17.s 1430 8fc: 65ca0230 fadd z16.d, z17.d, z10.d 1431 900: 65d90996 fmul z22.d, z12.d, z25.d 1432 904: 65ca05dc fsub z28.d, z14.d, z10.d 1433 908: 0456afc1 abs z1.h, p3/m, z30.h 1434 90c: 0400044f add z15.b, p1/m, z15.b, z2.b 1435 910: 0490920d asr z13.s, p4/m, z13.s, z16.s 1436 914: 04daa163 cnt z3.d, p0/m, z11.d 1437 918: 04d389c5 lsl z5.d, p2/m, z5.d, z14.d 1438 91c: 0411829d lsr z29.b, p0/m, z29.b, z20.b 1439 920: 04901774 mul z20.s, p5/m, z20.s, z27.s 1440 924: 0417b89a neg z26.b, p6/m, z4.b 1441 928: 041eb3d6 not z22.b, p4/m, z30.b 1442 92c: 04480b6b smax z11.h, p2/m, z11.h, z27.h 1443 930: 048a17dc smin z28.s, p5/m, z28.s, z30.s 1444 934: 048105be sub z30.s, p1/m, z30.s, z13.s 1445 938: 04dcb35e fabs z30.d, p4/m, z26.d 1446 93c: 65808d6f fadd z15.s, p3/m, z15.s, z11.s 1447 940: 65cd9e06 fdiv z6.d, p7/m, z6.d, z16.d 1448 944: 65869cfb fmax z27.s, p7/m, z27.s, z7.s 1449 948: 65c78893 fmin z19.d, p2/m, z19.d, z4.d 1450 94c: 658292d1 fmul z17.s, p4/m, z17.s, z22.s 1451 950: 04ddaebc fneg z28.d, p3/m, z21.d 1452 954: 6582b452 frintm z18.s, p5/m, z2.s 1453 958: 6580ade6 frintn z6.s, p3/m, z15.s 1454 95c: 65c1b42c frintp z12.d, p5/m, z1.d 1455 960: 658da632 fsqrt z18.s, p1/m, z17.s 1456 964: 658195af fsub z15.s, p5/m, z15.s, z13.s 1457 968: 65eb1f74 fmla z20.d, p7/m, z27.d, z11.d 1458 96c: 65f723c3 fmls z3.d, p0/m, z30.d, z23.d 1459 970: 65ba4b71 fnmla z17.s, p2/m, z27.s, z26.s 1460 974: 65fe76c6 fnmls z6.d, p5/m, z22.d, z30.d 1461 978: 04525f42 mla z2.h, p7/m, z26.h, z18.h 1462 97c: 04117056 mls z22.b, p4/m, z2.b, z17.b 1463 980: 04363338 and z24.d, z25.d, z22.d 1464 984: 04a33192 eor z18.d, z12.d, z3.d 1465 988: 0470339d orr z29.d, z28.d, z16.d 1466 98c: 049a2b86 andv s6, p2, z28.s 1467 990: 045824e7 orv h7, p1, z7.h 1468 994: 04193509 eorv b9, p5, z8.b 1469 998: 040837db smaxv b27, p5, z30.b 1470 99c: 044a221a sminv h26, p0, z16.h 1471 9a0: 65c73903 fminv d3, p6, z8.d 1472 9a4: 65c63b55 fmaxv d21, p6, z26.d 1473 9a8: 65982096 fadda s22, p0, s22, z4.s 1474 9ac: 04412071 uaddv d17, p0, z3.h 1475 */ 1476 1477 static const unsigned int insns[] = 1478 { 1479 0x8b0d82fa, 0xcb49970c, 0xab889dfc, 0xeb9ee787, 1480 0x0b9b3ec9, 0x4b9279a3, 0x2b88474e, 0x6b8c56c0, 1481 0x8a1a51e0, 0xaa11f4ba, 0xca0281b8, 0xea918c7c, 1482 0x0a5d4a19, 0x2a4b264d, 0x4a523ca5, 0x6a9b6ae2, 1483 0x8a70b79b, 0xaaba9728, 0xca6dfe3d, 0xea627f1c, 1484 0x0aa70f53, 0x2aaa0f06, 0x4a6176a4, 0x6a604eb0, 1485 0x1105ed91, 0x3100583e, 0x5101f8bd, 0x710f0306, 1486 0x9101a1a0, 0xb10a5cc8, 0xd10810aa, 0xf10fd061, 1487 0x120cb166, 0x321764bc, 0x52174681, 0x720c0247, 1488 0x9241018e, 0xb25a2969, 0xd278b411, 0xf26aad01, 1489 0x14000000, 0x17ffffd7, 0x14000242, 0x94000000, 1490 0x97ffffd4, 0x9400023f, 0x3400000a, 0x34fffa2a, 1491 0x3400478a, 0x35000008, 0x35fff9c8, 0x35004728, 1492 0xb400000b, 0xb4fff96b, 0xb40046cb, 0xb500001d, 1493 0xb5fff91d, 0xb500467d, 0x10000013, 0x10fff8b3, 1494 0x10004613, 0x90000013, 0x36300016, 0x3637f836, 1495 0x36304596, 0x3758000c, 0x375ff7cc, 0x3758452c, 1496 0x128313a0, 0x528a32c7, 0x7289173b, 0x92ab3acc, 1497 0xd2a0bf94, 0xf2c285e8, 0x9358722f, 0x330e652f, 1498 0x53067f3b, 0x93577c53, 0xb34a1aac, 0xd35a4016, 1499 0x13946c63, 0x93c3dbc8, 0x54000000, 0x54fff5a0, 1500 0x54004300, 0x54000001, 0x54fff541, 0x540042a1, 1501 0x54000002, 0x54fff4e2, 0x54004242, 0x54000002, 1502 0x54fff482, 0x540041e2, 0x54000003, 0x54fff423, 1503 0x54004183, 0x54000003, 0x54fff3c3, 0x54004123, 1504 0x54000004, 0x54fff364, 0x540040c4, 0x54000005, 1505 0x54fff305, 0x54004065, 0x54000006, 0x54fff2a6, 1506 0x54004006, 0x54000007, 0x54fff247, 0x54003fa7, 1507 0x54000008, 0x54fff1e8, 0x54003f48, 0x54000009, 1508 0x54fff189, 0x54003ee9, 0x5400000a, 0x54fff12a, 1509 0x54003e8a, 0x5400000b, 0x54fff0cb, 0x54003e2b, 1510 0x5400000c, 0x54fff06c, 0x54003dcc, 0x5400000d, 1511 0x54fff00d, 0x54003d6d, 0x5400000e, 0x54ffefae, 1512 0x54003d0e, 0x5400000f, 0x54ffef4f, 0x54003caf, 1513 0xd40658e1, 0xd4014d22, 0xd4046543, 0xd4273f60, 1514 0xd44cad80, 0xd503201f, 0xd69f03e0, 0xd6bf03e0, 1515 0xd5033fdf, 0xd5033e9f, 0xd50332bf, 0xd61f0200, 1516 0xd63f0280, 0xc80a7d1b, 0xc800fea1, 0xc85f7fb1, 1517 0xc85fff9d, 0xc89ffee1, 0xc8dffe95, 0x88167e7b, 1518 0x880bfcd0, 0x885f7c12, 0x885ffd44, 0x889ffed8, 1519 0x88dffe6a, 0x48017fc5, 0x4808fe2c, 0x485f7dc9, 1520 0x485ffc27, 0x489ffe05, 0x48dffd82, 0x080a7c6c, 1521 0x081cff4e, 0x085f7d5e, 0x085ffeae, 0x089ffd2d, 1522 0x08dfff76, 0xc87f4d7c, 0xc87fcc5e, 0xc8220417, 1523 0xc82cb5f0, 0x887f55b2, 0x887ff90b, 0x88382c2d, 1524 0x883aedb5, 0xf819928b, 0xb803e21c, 0x381f713b, 1525 0x781ce322, 0xf850f044, 0xb85e129e, 0x385e92f2, 1526 0x785ff35d, 0x39801921, 0x7881318b, 0x78dce02b, 1527 0xb8829313, 0xfc45f318, 0xbc5d50af, 0xfc001375, 1528 0xbc1951b7, 0xf8008c0b, 0xb801dc03, 0x38009dcb, 1529 0x781fdf1d, 0xf8570e2d, 0xb85faecc, 0x385f6d8d, 1530 0x785ebea0, 0x38804cf7, 0x789cbce3, 0x78df9cbc, 1531 0xb89eed38, 0xfc40cd6e, 0xbc5bdd93, 0xfc103c14, 1532 0xbc040c08, 0xf81a2784, 0xb81ca4ec, 0x381e855b, 1533 0x7801b506, 0xf853654e, 0xb85d74b0, 0x384095c2, 1534 0x785ec5bc, 0x389e15a9, 0x789dc703, 0x78c06474, 1535 0xb89ff667, 0xfc57e51e, 0xbc4155f9, 0xfc05a6ee, 1536 0xbc1df408, 0xf835da4a, 0xb836d9a4, 0x3833580d, 1537 0x7826cb6c, 0xf8706900, 0xb87ae880, 0x3865db2e, 1538 0x78724889, 0x38a7789b, 0x78beca2f, 0x78f6c810, 1539 0xb8bef956, 0xfc6afabd, 0xbc734963, 0xfc3d5b8d, 1540 0xbc25fbb7, 0xf9189d05, 0xb91ecb1d, 0x39187a33, 1541 0x791f226d, 0xf95aa2f3, 0xb9587bb7, 0x395f7176, 1542 0x795d9143, 0x399e7e08, 0x799a2697, 0x79df3422, 1543 0xb99c2624, 0xfd5c2374, 0xbd5fa1d9, 0xfd1d595a, 1544 0xbd1b1869, 0x58002cfb, 0x1800000b, 0xf8945060, 1545 0xd8000000, 0xf8ae6ba0, 0xf99a0080, 0x1a070035, 1546 0x3a0700a8, 0x5a0e0367, 0x7a11009b, 0x9a000380, 1547 0xba1e030c, 0xda0f0320, 0xfa030301, 0x0b340b12, 1548 0x2b2a278d, 0xcb22aa0f, 0x6b2d29bd, 0x8b2cce8c, 1549 0xab2b877e, 0xcb21c8ee, 0xeb3ba47d, 0x3a4d400e, 1550 0x7a5232c6, 0xba5e624e, 0xfa53814c, 0x3a52d8c2, 1551 0x7a4d8924, 0xba4b3aab, 0xfa4d7882, 0x1a96804c, 1552 0x1a912618, 0x5a90b0e6, 0x5a96976b, 0x9a9db06a, 1553 0x9a9b374c, 0xda95c14f, 0xda89c6fe, 0x5ac0015e, 1554 0x5ac005fd, 0x5ac00bdd, 0x5ac012b9, 0x5ac01404, 1555 0xdac002b2, 0xdac0061d, 0xdac00a95, 0xdac00e66, 1556 0xdac0107e, 0xdac01675, 0x1ac00b0b, 0x1ace0f3b, 1557 0x1ad221c3, 0x1ad825e7, 0x1ad92a3c, 0x1adc2f42, 1558 0x9ada0b25, 0x9ad20e1b, 0x9acc22a6, 0x9acc2480, 1559 0x9adc2a3b, 0x9ad22c5c, 0x9bce7dea, 0x9b597c6e, 1560 0x1b0e166f, 0x1b1ae490, 0x9b023044, 0x9b089e3d, 1561 0x9b391083, 0x9b24c73a, 0x9bb15f40, 0x9bbcc6af, 1562 0x1e23095b, 0x1e3918e0, 0x1e2f28c9, 0x1e2a39fd, 1563 0x1e270a22, 0x1e77096b, 0x1e771ba7, 0x1e6b2b6e, 1564 0x1e78388b, 0x1e6e09ec, 0x1f1c3574, 0x1f17f98b, 1565 0x1f2935da, 0x1f2574ea, 0x1f4b306f, 0x1f5ec7cf, 1566 0x1f6f3e93, 0x1f6226a9, 0x1e2040fb, 0x1e20c3dd, 1567 0x1e214031, 0x1e21c0c2, 0x1e22c06a, 0x1e604178, 1568 0x1e60c027, 0x1e61400b, 0x1e61c243, 0x1e6240dc, 1569 0x1e3800d6, 0x9e380360, 0x1e78005a, 0x9e7800e5, 1570 0x1e22017c, 0x9e2201b9, 0x1e6202eb, 0x9e620113, 1571 0x1e2602b2, 0x9e660299, 0x1e270253, 0x9e6703a2, 1572 0x1e2822c0, 0x1e7322a0, 0x1e202288, 0x1e602168, 1573 0x293c19f4, 0x2966387b, 0x69762971, 0xa9041dc7, 1574 0xa9475c0c, 0x29b61ccd, 0x29ee405e, 0x69ee0744, 1575 0xa9843977, 0xa9f46ebd, 0x28ba16b6, 0x28fc44db, 1576 0x68f61831, 0xa8b352ad, 0xa8c56d5e, 0x28024565, 1577 0x2874134e, 0xa8027597, 0xa87b1aa0, 0x0c40734f, 1578 0x4cdfa177, 0x0cc76ee8, 0x4cdf2733, 0x0d40c23d, 1579 0x4ddfcaf8, 0x0dd9ccaa, 0x4c408d52, 0x0cdf85ec, 1580 0x4d60c259, 0x0dffcbc1, 0x4de9ce50, 0x4cc24999, 1581 0x0c404a7a, 0x4d40e6af, 0x4ddfe9b9, 0x0dddef8e, 1582 0x4cdf07b1, 0x0cc000fb, 0x0d60e258, 0x0dffe740, 1583 0x0de2eb2c, 0xce648376, 0xce6184c7, 0xcec081fa, 1584 0xce6d89a2, 0xba5fd3e3, 0x3a5f03e5, 0xfa411be4, 1585 0x7a42cbe2, 0x93df03ff, 0xc820ffff, 0x8822fc7f, 1586 0xc8247cbf, 0x88267fff, 0x4e010fe0, 0x4e081fe1, 1587 0x4e0c1fe1, 0x4e0a1fe1, 0x4e071fe1, 0x4cc0ac3f, 1588 0x05a08020, 0x04b0e3e0, 0x0470e7e1, 0x042f9c20, 1589 0x043f9c35, 0x047f9c20, 0x04ff9c20, 0x04299420, 1590 0x04319160, 0x0461943e, 0x04a19020, 0x042053ff, 1591 0x047f5401, 0x25208028, 0x2538cfe0, 0x2578d001, 1592 0x25b8efe2, 0x25f8f007, 0xa400a3e0, 0xa4a8a7ea, 1593 0xa547a814, 0xa4084ffe, 0xa55c53e0, 0xa5e1540b, 1594 0xe400fbf6, 0xe408ffff, 0xe547e400, 0xe4014be0, 1595 0xe4a84fe0, 0xe5f25000, 0x858043e0, 0x85a043ff, 1596 0xe59f5d08, 0x1e601000, 0x1e603000, 0x1e621000, 1597 0x1e623000, 0x1e641000, 0x1e643000, 0x1e661000, 1598 0x1e663000, 0x1e681000, 0x1e683000, 0x1e6a1000, 1599 0x1e6a3000, 0x1e6c1000, 0x1e6c3000, 0x1e6e1000, 1600 0x1e6e3000, 0x1e701000, 0x1e703000, 0x1e721000, 1601 0x1e723000, 0x1e741000, 0x1e743000, 0x1e761000, 1602 0x1e763000, 0x1e781000, 0x1e783000, 0x1e7a1000, 1603 0x1e7a3000, 0x1e7c1000, 0x1e7c3000, 0x1e7e1000, 1604 0x1e7e3000, 0xf8388098, 0xf8340010, 0xf8241175, 1605 0xf83e22d0, 0xf82432ef, 0xf83a5186, 0xf82f41ee, 1606 0xf82973b9, 0xf82b6194, 0xf8b28216, 0xf8b50358, 1607 0xf8a61206, 0xf8b02219, 0xf8bc3218, 0xf8ba514f, 1608 0xf8ad428e, 0xf8a173d7, 0xf8ae60c2, 0xf8e38328, 1609 0xf8e003db, 0xf8e513c5, 0xf8eb2019, 0xf8ff3260, 1610 0xf8fd513a, 0xf8fa41ec, 0xf8eb724b, 0xf8f96316, 1611 0xf8608171, 0xf86600dd, 0xf86512a5, 0xf8732250, 1612 0xf87e339b, 0xf861503c, 0xf874421d, 0xf86d73aa, 1613 0xf87d62d3, 0xb82a83e4, 0xb83503e8, 0xb833138a, 1614 0xb82220b9, 0xb82332c8, 0xb83350ad, 0xb83d42b8, 1615 0xb83a7078, 0xb83862fa, 0xb8af8075, 0xb8b80328, 1616 0xb8b41230, 0xb8a22001, 0xb8b83064, 0xb8ac539f, 1617 0xb8aa405a, 0xb8ac73f2, 0xb8a163ad, 0xb8e08193, 1618 0xb8f101b6, 0xb8fc13fe, 0xb8e1239a, 0xb8e4309e, 1619 0xb8e6535e, 0xb8f24109, 0xb8ec7280, 0xb8e16058, 1620 0xb8608309, 0xb87a03d0, 0xb86312ea, 0xb86a2244, 1621 0xb862310b, 0xb86a522f, 0xb862418a, 0xb86c71af, 1622 0xb8626287, 0x042401f9, 0x04b10564, 0x65ca0230, 1623 0x65d90996, 0x65ca05dc, 0x0456afc1, 0x0400044f, 1624 0x0490920d, 0x04daa163, 0x04d389c5, 0x0411829d, 1625 0x04901774, 0x0417b89a, 0x041eb3d6, 0x04480b6b, 1626 0x048a17dc, 0x048105be, 0x04dcb35e, 0x65808d6f, 1627 0x65cd9e06, 0x65869cfb, 0x65c78893, 0x658292d1, 1628 0x04ddaebc, 0x6582b452, 0x6580ade6, 0x65c1b42c, 1629 0x658da632, 0x658195af, 0x65eb1f74, 0x65f723c3, 1630 0x65ba4b71, 0x65fe76c6, 0x04525f42, 0x04117056, 1631 0x04363338, 0x04a33192, 0x0470339d, 0x049a2b86, 1632 0x045824e7, 0x04193509, 0x040837db, 0x044a221a, 1633 0x65c73903, 0x65c63b55, 0x65982096, 0x04412071, 1634 1635 }; 1636 // END Generated code -- do not edit 1637 1638 asm_check((unsigned int *)entry, insns, sizeof insns / sizeof insns[0]); 1639 1640 { 1641 address PC = __ pc(); 1642 __ ld1(v0, __ T16B, Address(r16)); // No offset 1643 __ ld1(v0, __ T8H, __ post(r16, 16)); // Post-index 1644 __ ld2(v0, v1, __ T8H, __ post(r24, 16 * 2)); // Post-index 1645 __ ld1(v0, __ T16B, __ post(r16, r17)); // Register post-index 1646 static const unsigned int vector_insns[] = { 1647 0x4c407200, // ld1 {v0.16b}, [x16] 1648 0x4cdf7600, // ld1 {v0.8h}, [x16], #16 1649 0x4cdf8700, // ld2 {v0.8h, v1.8h}, [x24], #32 1650 0x4cd17200, // ld1 {v0.16b}, [x16], x17 1651 }; 1652 asm_check((unsigned int *)PC, vector_insns, 1653 sizeof vector_insns / sizeof vector_insns[0]); 1654 } 1655 } 1656 #endif // ASSERT 1657 1658 #undef __ 1659 1660 void Assembler::emit_data64(jlong data, 1661 relocInfo::relocType rtype, 1662 int format) { 1663 if (rtype == relocInfo::none) { 1664 emit_int64(data); 1665 } else { 1666 emit_data64(data, Relocation::spec_simple(rtype), format); 1667 } 1668 } 1669 1670 void Assembler::emit_data64(jlong data, 1671 RelocationHolder const& rspec, 1672 int format) { 1673 1674 assert(inst_mark() != NULL, "must be inside InstructionMark"); 1675 // Do not use AbstractAssembler::relocate, which is not intended for 1676 // embedded words. Instead, relocate to the enclosing instruction. 1677 code_section()->relocate(inst_mark(), rspec, format); 1678 emit_int64(data); 1679 } 1680 1681 extern "C" { 1682 void das(uint64_t start, int len) { 1683 ResourceMark rm; 1684 len <<= 2; 1685 if (len < 0) 1686 Disassembler::decode((address)start + len, (address)start); 1687 else 1688 Disassembler::decode((address)start, (address)start + len); 1689 } 1690 1691 JNIEXPORT void das1(uintptr_t insn) { 1692 das(insn, 1); 1693 } 1694 } 1695 1696 #define gas_assert(ARG1) assert(ARG1, #ARG1) 1697 1698 #define __ as-> 1699 1700 void Address::lea(MacroAssembler *as, Register r) const { 1701 Relocation* reloc = _rspec.reloc(); 1702 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 1703 1704 switch(_mode) { 1705 case base_plus_offset: { 1706 if (_offset == 0 && _base == r) // it's a nop 1707 break; 1708 if (_offset > 0) 1709 __ add(r, _base, _offset); 1710 else 1711 __ sub(r, _base, -_offset); 1712 break; 1713 } 1714 case base_plus_offset_reg: { 1715 __ add(r, _base, _index, _ext.op(), MAX2(_ext.shift(), 0)); 1716 break; 1717 } 1718 case literal: { 1719 if (rtype == relocInfo::none) 1720 __ mov(r, target()); 1721 else 1722 __ movptr(r, (uint64_t)target()); 1723 break; 1724 } 1725 default: 1726 ShouldNotReachHere(); 1727 } 1728 } 1729 1730 void Assembler::adrp(Register reg1, const Address &dest, uintptr_t &byte_offset) { 1731 ShouldNotReachHere(); 1732 } 1733 1734 #undef __ 1735 1736 #define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use) 1737 1738 void Assembler::adr(Register Rd, address adr) { 1739 intptr_t offset = adr - pc(); 1740 int offset_lo = offset & 3; 1741 offset >>= 2; 1742 starti; 1743 f(0, 31), f(offset_lo, 30, 29), f(0b10000, 28, 24), sf(offset, 23, 5); 1744 rf(Rd, 0); 1745 } 1746 1747 void Assembler::_adrp(Register Rd, address adr) { 1748 uint64_t pc_page = (uint64_t)pc() >> 12; 1749 uint64_t adr_page = (uint64_t)adr >> 12; 1750 intptr_t offset = adr_page - pc_page; 1751 int offset_lo = offset & 3; 1752 offset >>= 2; 1753 starti; 1754 f(1, 31), f(offset_lo, 30, 29), f(0b10000, 28, 24), sf(offset, 23, 5); 1755 rf(Rd, 0); 1756 } 1757 1758 #undef starti 1759 1760 Address::Address(address target, relocInfo::relocType rtype) : _mode(literal){ 1761 _is_lval = false; 1762 _target = target; 1763 switch (rtype) { 1764 case relocInfo::oop_type: 1765 case relocInfo::metadata_type: 1766 // Oops are a special case. Normally they would be their own section 1767 // but in cases like icBuffer they are literals in the code stream that 1768 // we don't have a section for. We use none so that we get a literal address 1769 // which is always patchable. 1770 break; 1771 case relocInfo::external_word_type: 1772 _rspec = external_word_Relocation::spec(target); 1773 break; 1774 case relocInfo::internal_word_type: 1775 _rspec = internal_word_Relocation::spec(target); 1776 break; 1777 case relocInfo::opt_virtual_call_type: 1778 _rspec = opt_virtual_call_Relocation::spec(); 1779 break; 1780 case relocInfo::static_call_type: 1781 _rspec = static_call_Relocation::spec(); 1782 break; 1783 case relocInfo::runtime_call_type: 1784 _rspec = runtime_call_Relocation::spec(); 1785 break; 1786 case relocInfo::poll_type: 1787 case relocInfo::poll_return_type: 1788 _rspec = Relocation::spec_simple(rtype); 1789 break; 1790 case relocInfo::none: 1791 _rspec = RelocationHolder::none; 1792 break; 1793 default: 1794 ShouldNotReachHere(); 1795 break; 1796 } 1797 } 1798 1799 void Assembler::b(const Address &dest) { 1800 code_section()->relocate(pc(), dest.rspec()); 1801 b(dest.target()); 1802 } 1803 1804 void Assembler::bl(const Address &dest) { 1805 code_section()->relocate(pc(), dest.rspec()); 1806 bl(dest.target()); 1807 } 1808 1809 void Assembler::adr(Register r, const Address &dest) { 1810 code_section()->relocate(pc(), dest.rspec()); 1811 adr(r, dest.target()); 1812 } 1813 1814 void Assembler::br(Condition cc, Label &L) { 1815 if (L.is_bound()) { 1816 br(cc, target(L)); 1817 } else { 1818 L.add_patch_at(code(), locator()); 1819 br(cc, pc()); 1820 } 1821 } 1822 1823 void Assembler::wrap_label(Label &L, 1824 Assembler::uncond_branch_insn insn) { 1825 if (L.is_bound()) { 1826 (this->*insn)(target(L)); 1827 } else { 1828 L.add_patch_at(code(), locator()); 1829 (this->*insn)(pc()); 1830 } 1831 } 1832 1833 void Assembler::wrap_label(Register r, Label &L, 1834 compare_and_branch_insn insn) { 1835 if (L.is_bound()) { 1836 (this->*insn)(r, target(L)); 1837 } else { 1838 L.add_patch_at(code(), locator()); 1839 (this->*insn)(r, pc()); 1840 } 1841 } 1842 1843 void Assembler::wrap_label(Register r, int bitpos, Label &L, 1844 test_and_branch_insn insn) { 1845 if (L.is_bound()) { 1846 (this->*insn)(r, bitpos, target(L)); 1847 } else { 1848 L.add_patch_at(code(), locator()); 1849 (this->*insn)(r, bitpos, pc()); 1850 } 1851 } 1852 1853 void Assembler::wrap_label(Label &L, prfop op, prefetch_insn insn) { 1854 if (L.is_bound()) { 1855 (this->*insn)(target(L), op); 1856 } else { 1857 L.add_patch_at(code(), locator()); 1858 (this->*insn)(pc(), op); 1859 } 1860 } 1861 1862 // An "all-purpose" add/subtract immediate, per ARM documentation: 1863 // A "programmer-friendly" assembler may accept a negative immediate 1864 // between -(2^24 -1) and -1 inclusive, causing it to convert a 1865 // requested ADD operation to a SUB, or vice versa, and then encode 1866 // the absolute value of the immediate as for uimm24. 1867 void Assembler::add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op, 1868 int negated_op) { 1869 bool sets_flags = op & 1; // this op sets flags 1870 union { 1871 unsigned u; 1872 int imm; 1873 }; 1874 u = uimm; 1875 bool shift = false; 1876 bool neg = imm < 0; 1877 if (neg) { 1878 imm = -imm; 1879 op = negated_op; 1880 } 1881 assert(Rd != sp || imm % 16 == 0, "misaligned stack"); 1882 if (imm >= (1 << 11) 1883 && ((imm >> 12) << 12 == imm)) { 1884 imm >>= 12; 1885 shift = true; 1886 } 1887 f(op, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); 1888 1889 // add/subtract immediate ops with the S bit set treat r31 as zr; 1890 // with S unset they use sp. 1891 if (sets_flags) 1892 zrf(Rd, 0); 1893 else 1894 srf(Rd, 0); 1895 1896 srf(Rn, 5); 1897 } 1898 1899 bool Assembler::operand_valid_for_add_sub_immediate(int64_t imm) { 1900 bool shift = false; 1901 uint64_t uimm = (uint64_t)uabs(imm); 1902 if (uimm < (1 << 12)) 1903 return true; 1904 if (uimm < (1 << 24) 1905 && ((uimm >> 12) << 12 == uimm)) { 1906 return true; 1907 } 1908 return false; 1909 } 1910 1911 bool Assembler::operand_valid_for_logical_immediate(bool is32, uint64_t imm) { 1912 return encode_logical_immediate(is32, imm) != 0xffffffff; 1913 } 1914 1915 static uint64_t doubleTo64Bits(jdouble d) { 1916 union { 1917 jdouble double_value; 1918 uint64_t double_bits; 1919 }; 1920 1921 double_value = d; 1922 return double_bits; 1923 } 1924 1925 bool Assembler::operand_valid_for_float_immediate(double imm) { 1926 // If imm is all zero bits we can use ZR as the source of a 1927 // floating-point value. 1928 if (doubleTo64Bits(imm) == 0) 1929 return true; 1930 1931 // Otherwise try to encode imm then convert the encoded value back 1932 // and make sure it's the exact same bit pattern. 1933 unsigned result = encoding_for_fp_immediate(imm); 1934 return doubleTo64Bits(imm) == fp_immediate_for_encoding(result, true); 1935 } 1936 1937 int AbstractAssembler::code_fill_byte() { 1938 return 0; 1939 } 1940 1941 // n.b. this is implemented in subclass MacroAssembler 1942 void Assembler::bang_stack_with_offset(int offset) { Unimplemented(); } 1943 1944 1945 // and now the routines called by the assembler which encapsulate the 1946 // above encode and decode functions 1947 1948 uint32_t 1949 asm_util::encode_logical_immediate(bool is32, uint64_t imm) 1950 { 1951 if (is32) { 1952 /* Allow all zeros or all ones in top 32-bits, so that 1953 constant expressions like ~1 are permitted. */ 1954 if (imm >> 32 != 0 && imm >> 32 != 0xffffffff) 1955 return 0xffffffff; 1956 /* Replicate the 32 lower bits to the 32 upper bits. */ 1957 imm &= 0xffffffff; 1958 imm |= imm << 32; 1959 } 1960 1961 return encoding_for_logical_immediate(imm); 1962 } 1963 1964 unsigned Assembler::pack(double value) { 1965 float val = (float)value; 1966 unsigned result = encoding_for_fp_immediate(val); 1967 guarantee(unpack(result) == value, 1968 "Invalid floating-point immediate operand"); 1969 return result; 1970 } 1971 1972 // Packed operands for Floating-point Move (immediate) 1973 1974 static float unpack(unsigned value) { 1975 union { 1976 unsigned ival; 1977 float val; 1978 }; 1979 ival = fp_immediate_for_encoding(value, 0); 1980 return val; 1981 }