1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved.
   6  * reserved.  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE
   7  * HEADER.
   8  *
   9  * This code is free software; you can redistribute it and/or modify it
  10  * under the terms of the GNU General Public License version 2 only, as
  11  * published by the Free Software Foundation.
  12  *
  13  * This code is distributed in the hope that it will be useful, but WITHOUT
  14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16  * version 2 for more details (a copy is included in the LICENSE file that
  17  * accompanied this code).
  18  *
  19  * You should have received a copy of the GNU General Public License version
  20  * 2 along with this work; if not, write to the Free Software Foundation,
  21  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  22  *
  23  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  24  * or visit www.oracle.com if you need additional information or have any
  25  * questions.
  26  *
  27  */
  28 
  29 #include <stdio.h>
  30 #include <sys/types.h>
  31 
  32 #include "precompiled.hpp"
  33 #include "asm/assembler.hpp"
  34 #include "asm/assembler.inline.hpp"
  35 #include "compiler/disassembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "runtime/interfaceSupport.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "register_aarch32.hpp"
  41 #include "vm_version_aarch32.hpp"
  42 
  43 extern "C" void entry(CodeBuffer *cb);
  44 
  45 #define __ _masm.
  46 #ifdef PRODUCT
  47 #define BLOCK_COMMENT(str) /* nothing */
  48 #else
  49 #define BLOCK_COMMENT(str) block_comment(str)
  50 #endif
  51 
  52 #define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":")
  53 
  54 void entry(CodeBuffer *cb) {
  55 
  56   // {
  57   //   for (int i = 0; i < 256; i+=16)
  58   //     {
  59   //    printf("\"%20.20g\", ", unpack(i));
  60   //    printf("\"%20.20g\", ", unpack(i+1));
  61   //     }
  62   //   printf("\n");
  63   // }
  64 
  65 #if defined(ASSERT) && !defined (__SOFTFP__)
  66   Assembler _masm(cb);
  67   address entry = __ pc();
  68 
  69   // Smoke test for assembler
  70 
  71   // we're checking the code generation, not applicability of the code to the actual target
  72   // so temporarily override the detected cpu to allow emission of all instructions
  73   const ProcessorFeatures detected_features = VM_Version::features();
  74   VM_Version::features(FT_ALL);
  75 
  76 // BEGIN  Generated code -- do not edit
  77 // Generated by aarch32-asmtest.py
  78     Label back, forth, near, near_post, near_flt, near_post_flt;
  79     __ bind(back);
  80 
  81 // ThreeRegSft
  82     __ add(r8, r2, r11, ::lsr(10));                    //   add r8, r2, r11, lsr #10
  83     __ adds(r1, r3, r7, ::asr(1), Assembler::EQ);      //   addEQs r1, r3, r7, asr #1
  84     __ eor(r0, r9, r4, ::lsl(5));                      //   eor r0, r9, r4, lsl #5
  85     __ eors(r9, r2, r6, ::rrx(), Assembler::GT);       //   eorGTs r9, r2, r6, rrx
  86     __ sub(r0, r12, lr, ::lsr(0), Assembler::GT);      //   subGT r0, r12, lr, lsr #0
  87     __ subs(r8, r2, r4, ::ror(6), Assembler::EQ);      //   subEQs r8, r2, r4, ror #6
  88     __ rsb(r8, r9, sp, ::lsl(3));                      //   rsb r8, r9, sp, lsl #3
  89     __ rsbs(r8, r0, r4, ::ror(16), Assembler::VS);     //   rsbVSs r8, r0, r4, ror #16
  90     __ add(r9, r5, r1, ::lsr(15), Assembler::LE);      //   addLE r9, r5, r1, lsr #15
  91     __ adds(r1, sp, r6, ::asr(5));                     //   adds r1, sp, r6, asr #5
  92     __ adc(r11, sp, r7, ::asr(1), Assembler::GT);      //   adcGT r11, sp, r7, asr #1
  93     __ adcs(r0, r8, r9, ::lsr(6));                     //   adcs r0, r8, r9, lsr #6
  94     __ sbc(r9, r3, r6, ::ror(5));                      //   sbc r9, r3, r6, ror #5
  95     __ sbcs(r1, sp, r5, ::asr(16), Assembler::HI);     //   sbcHIs r1, sp, r5, asr #16
  96     __ rsc(r8, r2, r6, ::lsl(9), Assembler::CC);       //   rscCC r8, r2, r6, lsl #9
  97     __ rscs(r10, r4, sp, ::ror(14));                   //   rscs r10, r4, sp, ror #14
  98     __ orr(r11, sp, r5, ::lsl(15), Assembler::NE);     //   orrNE r11, sp, r5, lsl #15
  99     __ orrs(r9, r10, r4, ::ror(14));                   //   orrs r9, r10, r4, ror #14
 100     __ bic(r9, sp, r5, ::ror(1));                      //   bic r9, sp, r5, ror #1
 101     __ bics(r0, r2, r7, ::asr(10));                    //   bics r0, r2, r7, asr #10
 102 
 103 // ThreeRegRSR
 104     __ add(sp, r6, r7, ::ror(r7));                     //   add sp, r6, r7, ror r7
 105     __ adds(r4, r12, r6, ::ror(r7), Assembler::HI);    //   addHIs r4, r12, r6, ror r7
 106     __ eor(r5, r6, r7, ::asr(r12), Assembler::LS);     //   eorLS r5, r6, r7, asr r12
 107     __ eors(r8, r5, sp, ::lsl(r4), Assembler::AL);     //   eorALs r8, r5, sp, lsl r4
 108     __ sub(r2, r12, r5, ::asr(r0));                    //   sub r2, r12, r5, asr r0
 109     __ subs(r9, r3, r7, ::lsl(r12), Assembler::HS);    //   subHSs r9, r3, r7, lsl r12
 110     __ rsb(r9, r12, r4, ::lsl(r6), Assembler::GT);     //   rsbGT r9, r12, r4, lsl r6
 111     __ rsbs(r8, r2, r12, ::lsl(r1));                   //   rsbs r8, r2, r12, lsl r1
 112     __ add(r4, r12, sp, ::lsl(sp));                    //   add r4, r12, sp, lsl sp
 113     __ adds(r8, r11, r6, ::ror(sp));                   //   adds r8, r11, r6, ror sp
 114     __ adc(r0, r2, r5, ::lsl(r4), Assembler::NE);      //   adcNE r0, r2, r5, lsl r4
 115     __ adcs(r11, lr, r6, ::asr(r2));                   //   adcs r11, lr, r6, asr r2
 116     __ sbc(r8, r10, lr, ::asr(r3), Assembler::HI);     //   sbcHI r8, r10, lr, asr r3
 117     __ sbcs(r1, r12, r5, ::lsl(r6));                   //   sbcs r1, r12, r5, lsl r6
 118     __ rsc(r4, r5, lr, ::ror(r10), Assembler::VS);     //   rscVS r4, r5, lr, ror r10
 119     __ rscs(r1, r12, sp, ::lsl(r8));                   //   rscs r1, r12, sp, lsl r8
 120     __ orr(r8, r1, r6, ::ror(r0), Assembler::VS);      //   orrVS r8, r1, r6, ror r0
 121     __ orrs(r11, sp, r7, ::ror(r5));                   //   orrs r11, sp, r7, ror r5
 122     __ bic(r4, lr, r6, ::lsl(r2), Assembler::AL);      //   bicAL r4, lr, r6, lsl r2
 123     __ bics(r10, r11, sp, ::lsl(r3));                  //   bics r10, r11, sp, lsl r3
 124 
 125 // TwoRegImm
 126     __ add(r8, sp, (unsigned)268435462U, Assembler::HI); // addHI r8, sp, #268435462
 127     __ adds(sp, lr, (unsigned)162529280U);             //   adds sp, lr, #162529280
 128     __ eor(lr, r6, (unsigned)8192000U);                //   eor lr, r6, #8192000
 129     __ eors(r2, r3, (unsigned)292U);                   //   eors r2, r3, #292
 130     __ sub(r4, sp, (unsigned)227540992U);              //   sub r4, sp, #227540992
 131     __ subs(r1, lr, (unsigned)33554432U, Assembler::LT); // subLTs r1, lr, #33554432
 132     __ rsb(r0, r5, (unsigned)2483027968U);             //   rsb r0, r5, #2483027968
 133     __ rsbs(r8, r4, (unsigned)3080192U, Assembler::LO); //  rsbLOs r8, r4, #3080192
 134     __ add(r9, r4, (unsigned)2147483648U, Assembler::LT); //    addLT r9, r4, #2147483648
 135     __ adds(r8, r4, (unsigned)32768U, Assembler::AL);  //   addALs r8, r4, #32768
 136     __ adc(r10, lr, (unsigned)10752U, Assembler::CS);  //   adcCS r10, lr, #10752
 137     __ adcs(r10, r6, (unsigned)774144U);               //   adcs r10, r6, #774144
 138     __ sbc(r2, r12, (unsigned)637534208U);             //   sbc r2, r12, #637534208
 139     __ sbcs(r8, r10, (unsigned)692060160U);            //   sbcs r8, r10, #692060160
 140     __ rsc(sp, r6, (unsigned)7405568U);                //   rsc sp, r6, #7405568
 141     __ rscs(r10, r11, (unsigned)244318208U, Assembler::NE); //  rscNEs r10, r11, #244318208
 142     __ orr(r3, r7, (unsigned)66846720U, Assembler::VS); //  orrVS r3, r7, #66846720
 143     __ orrs(r2, r5, (unsigned)1327104U, Assembler::EQ); //  orrEQs r2, r5, #1327104
 144     __ bic(r8, r1, (unsigned)3744U, Assembler::VS);    //   bicVS r8, r1, #3744
 145     __ bics(r0, r2, (unsigned)2684354560U, Assembler::LO); //   bicLOs r0, r2, #2684354560
 146 
 147 // TwoRegSft
 148     __ tst(r8, sp, ::lsl(5));                          //   tst r8, sp, lsl #5
 149     __ teq(r6, r7, ::lsr(3));                          //   teq r6, r7, lsr #3
 150     __ cmp(r12, r4, ::ror(2));                         //   cmp r12, r4, ror #2
 151     __ cmn(r5, r7, ::lsl(16), Assembler::LT);          //   cmnLT r5, r7, lsl #16
 152 
 153 // TwoRegRSR
 154     __ tst(r2, lr, ::lsr(r7));                         //   tst r2, lr, lsr r7
 155     __ teq(r0, r2, ::ror(r5), Assembler::CC);          //   teqCC r0, r2, ror r5
 156     __ cmp(lr, r7, ::lsr(r11), Assembler::LS);         //   cmpLS lr, r7, lsr r11
 157     __ cmn(r10, r7, ::lsl(r11), Assembler::VS);        //   cmnVS r10, r7, lsl r11
 158 
 159 // OneRegImm
 160     __ tst(r2, (unsigned)557842432U);                  //   tst r2, #557842432
 161     __ teq(lr, (unsigned)7077888U, Assembler::MI);     //   teqMI lr, #7077888
 162     __ cmp(r5, (unsigned)939524096U);                  //   cmp r5, #939524096
 163     __ cmn(r7, (unsigned)2147483650U, Assembler::LO);  //   cmnLO r7, #2147483650
 164 
 165 // Shift op
 166     __ lsl(r0, r4, (unsigned)23U);                     //   lsl r0, r4, #23
 167     __ lsls(r1, r4, (unsigned)9U);                     //   lsls r1, r4, #9
 168     __ lsr(r0, r10, (unsigned)3U);                     //   lsr r0, r10, #3
 169     __ lsrs(r0, r10, (unsigned)20U);                   //   lsrs r0, r10, #20
 170     __ asr(r1, r9, (unsigned)11U);                     //   asr r1, r9, #11
 171     __ asrs(r2, r11, (unsigned)10U, Assembler::VS);    //   asrVSs r2, r11, #10
 172 
 173 // shift op
 174     __ ror(r8, r2, (unsigned)31U, Assembler::CC);      //   rorCC r8, r2, #31
 175     __ rors(r9, r12, (unsigned)8U);                    //   rors r9, r12, #8
 176 
 177 // ThreeRegNon
 178     __ ror(r8, lr, r7);                                //   ror r8, lr, r7
 179     __ rors(r12, r3, r4);                              //   rors r12, r3, r4
 180     __ lsl(r12, sp, lr, Assembler::GT);                //   lslGT r12, sp, lr
 181     __ lsls(r12, sp, r6, Assembler::AL);               //   lslALs r12, sp, r6
 182     __ lsr(r0, r1, r9, Assembler::GT);                 //   lsrGT r0, r1, r9
 183     __ lsrs(r11, r3, r12, Assembler::GT);              //   lsrGTs r11, r3, r12
 184     __ asr(r2, r12, r6, Assembler::LE);                //   asrLE r2, r12, r6
 185     __ asrs(r1, r10, r6, Assembler::LT);               //   asrLTs r1, r10, r6
 186 
 187 // TwoRegNon
 188     __ mov(r10, r3);                                   //   mov r10, r3
 189     __ movs(r0, r9);                                   //   movs r0, r9
 190 
 191 // OneRegImm
 192     __ mov_i(r3, (unsigned)656U, Assembler::VC);         // movVC r3, #656
 193     __ movs_i(r4, (unsigned)2064384U);                   // movs r4, #2064384
 194 
 195 // TwoRegSft
 196     __ mov(r12, r6, ::lsr(3));                         //   mov r12, r6, lsr #3
 197     __ movs(r5, sp, ::asr(10), Assembler::VC);         //   movVCs r5, sp, asr #10
 198 
 199 // TwoRegRSR
 200     __ mov(r1, lr, ::ror(r3));                         //   mov r1, lr, ror r3
 201     __ movs(r8, r12, ::ror(r9), Assembler::EQ);        //   movEQs r8, r12, ror r9
 202 
 203 // OneRegImm16
 204     __ movw_i(r11, (unsigned)53041U, Assembler::LO);     // movwLO r11, #53041
 205     __ movt_i(r9, (unsigned)11255U, Assembler::LO);      // movtLO r9, #11255
 206 
 207 // ThreeRegNon
 208     __ mul(r1, sp, r5, Assembler::LE);                 //   mulLE r1, sp, r5
 209     __ muls(r0, r10, r11);                             //   muls r0, r10, r11
 210 
 211 // FourRegNon
 212     __ mla(r0, r3, r12, r7);                           //   mla r0, r3, r12, r7
 213     __ mlas(r8, r11, r3, r6, Assembler::EQ);           //   mlaEQs r8, r11, r3, r6
 214     __ umull(lr, r4, r5, r6);                          //   umull lr, r4, r5, r6
 215     __ umulls(r0, r4, r6, r7);                         //   umulls r0, r4, r6, r7
 216     __ umlal(r8, r0, r11, lr);                         //   umlal r8, r0, r11, lr
 217     __ umlals(r11, r4, lr, r7);                        //   umlals r11, r4, lr, r7
 218     __ smull(r1, r5, r6, r7, Assembler::HS);           //   smullHS r1, r5, r6, r7
 219     __ smulls(r0, r11, r12, r5, Assembler::MI);        //   smullMIs r0, r11, r12, r5
 220 
 221 // FourRegNon
 222     __ umaal(r8, r9, r2, r5);                          //   umaal r8, r9, r2, r5
 223     __ mls(r0, r4, sp, lr, Assembler::EQ);             //   mlsEQ r0, r4, sp, lr
 224 
 225 // ThreeRegNon
 226     __ qadd(r9, r4, sp, Assembler::PL);                //   qaddPL r9, r4, sp
 227     __ qsub(r0, r12, r5, Assembler::MI);               //   qsubMI r0, r12, r5
 228     __ qdadd(r3, r5, r7);                              //   qdadd r3, r5, r7
 229     __ qdsub(r9, r2, r4);                              //   qdsub r9, r2, r4
 230 
 231 // FourRegNon
 232     __ smlabb(r1, r12, r5, r6);                        //   smlabb r1, r12, r5, r6
 233     __ smlabt(r0, r10, r12, r6);                       //   smlabt r0, r10, r12, r6
 234     __ smlatb(r8, r1, r3, lr);                         //   smlatb r8, r1, r3, lr
 235     __ smlatt(r1, sp, r6, r7);                         //   smlatt r1, sp, r6, r7
 236     __ smlawb(r0, r3, r4, r6);                         //   smlawb r0, r3, r4, r6
 237     __ smlawt(r11, r4, lr, r7);                        //   smlawt r11, r4, lr, r7
 238     __ smlalbb(r0, r10, r6, r7);                       //   smlalbb r0, r10, r6, r7
 239     __ smlalbt(r3, r11, r4, lr, Assembler::LS);        //   smlalbtLS r3, r11, r4, lr
 240     __ smlaltb(r8, r11, r3, r12);                      //   smlaltb r8, r11, r3, r12
 241     __ smlaltt(r8, r1, r3, r5);                        //   smlaltt r8, r1, r3, r5
 242 
 243 // ThreeRegNon
 244     __ smulwb(r2, r12, sp, Assembler::HS);             //   smulwbHS r2, r12, sp
 245     __ smulwt(r8, r12, r6);                            //   smulwt r8, r12, r6
 246     __ smulbb(r2, r6, lr, Assembler::GE);              //   smulbbGE r2, r6, lr
 247     __ smulbt(r8, r12, r7);                            //   smulbt r8, r12, r7
 248     __ smultb(r10, r3, lr, Assembler::EQ);             //   smultbEQ r10, r3, lr
 249     __ smultt(r0, r3, sp);                             //   smultt r0, r3, sp
 250 
 251 // MemoryOp
 252     __ ldr(r10, Address(r7, r9, lsl(), Address::ADD, Address::post)); //    ldr r10, [r7], r9
 253     __ ldrb(r0, Address(r9, 196));                     //   ldrb r0, [r9, #196]
 254     __ ldrh(lr, Address(r4, r6, lsl(), Address::ADD, Address::pre)); // ldrh lr, [r4, r6]!
 255     __ ldrsb(r6, Address(__ pre(r9, 232)));            //   ldrsb r6, [r9, #232]!
 256     __ ldrsh(r2, Address(r1, r1, lsl(), Address::ADD, Address::post)); //   ldrsh r2, [r1], r1
 257     __ str(r0, Address(r9, r4, lsl(), Address::ADD, Address::post)); // str r0, [r9], r4
 258     __ strb(r3, Address(__ pre(r5, 92)));              //   strb r3, [r5, #92]!
 259     __ strh(r2, Address(r8, 160));                     //   strh r2, [r8, #160]
 260 
 261 // MemoryOp
 262     __ ldr(r8, Address(r12, r8, lsl(), Address::ADD, Address::off)); // ldr r8, [r12, r8]
 263     __ ldrb(r11, Address(__ post(r10, 16)));           //   ldrb r11, [r10], #16
 264     __ ldrh(r11, Address(r10, r6, lsl(), Address::ADD, Address::off)); //   ldrh r11, [r10, r6]
 265     __ ldrsb(r5, Address(r11, r10, lsl(), Address::ADD, Address::pre)); //  ldrsb r5, [r11, r10]!
 266     __ ldrsh(r6, Address(r3, r7, lsl(), Address::ADD, Address::off)); //    ldrsh r6, [r3, r7]
 267     __ str(r7, Address(sp, r5, lsl(), Address::ADD, Address::pre)); //  str r7, [sp, r5]!
 268     __ strb(r2, Address(r10));                         //   strb r2, [r10]
 269     __ strh(r6, Address(r4, r3, lsl(), Address::ADD, Address::post)); //    strh r6, [r4], r3
 270 
 271 // MemoryOp
 272     __ ldr(r10, Address(r12));                         //   ldr r10, [r12]
 273     __ ldrb(r4, Address(__ post(r11, 132)));           //   ldrb r4, [r11], #132
 274     __ ldrh(r9, Address(r9, r12, lsl(), Address::ADD, Address::post)); //   ldrh r9, [r9], r12
 275     __ ldrsb(r9, Address(__ post(r3, 148)));           //   ldrsb r9, [r3], #148
 276     __ ldrsh(r11, Address(__ pre(r2, 148)));           //   ldrsh r11, [r2, #148]!
 277     __ str(r11, Address(sp, r11, lsl(), Address::ADD, Address::off)); //    str r11, [sp, r11]
 278     __ strb(r1, Address(sp, r10, lsl(), Address::ADD, Address::off)); //    strb r1, [sp, r10]
 279     __ strh(r10, Address(lr, r9, lsl(), Address::ADD, Address::post)); //   strh r10, [lr], r9
 280 
 281 // MemoryOp
 282     __ ldr(r6, Address(r3, r4, lsl(), Address::ADD, Address::pre)); //  ldr r6, [r3, r4]!
 283     __ ldrb(r4, Address(r6, sp, lsl(), Address::ADD, Address::pre)); // ldrb r4, [r6, sp]!
 284     __ ldrh(r6, Address(r7, r10, lsl(), Address::ADD, Address::post)); //   ldrh r6, [r7], r10
 285     __ ldrsb(r0, Address(r6, r11, lsl(), Address::ADD, Address::pre)); //   ldrsb r0, [r6, r11]!
 286     __ ldrsh(r10, Address(r6, sp, lsl(), Address::ADD, Address::post)); //  ldrsh r10, [r6], sp
 287     __ str(r7, Address(r3, r12, lsl(), Address::ADD, Address::off)); // str r7, [r3, r12]
 288     __ strb(r3, Address(r8, r1, lsl(), Address::ADD, Address::pre)); // strb r3, [r8, r1]!
 289     __ strh(r4, Address(r12, 64));                     //   strh r4, [r12, #64]
 290 
 291     __ bind(near);
 292 
 293 // LitMemoryOp
 294     __ ldr(r1, near);                                  //   ldr r1, near
 295     __ ldrb(r7, __ pc());                              //   ldrb r7, .
 296     __ ldrh(r2, near);                                 //   ldrh r2, near
 297     __ ldrsb(r10, __ pc());                            //   ldrsb r10, .
 298     __ ldrsh(lr, near_post);                           //   ldrsh lr, near_post
 299 
 300 // LitMemoryOp
 301     __ ldr(r2, __ pc());                               //   ldr r2, .
 302     __ ldrb(r3, __ pc());                              //   ldrb r3, .
 303     __ ldrh(r7, near_post);                            //   ldrh r7, near_post
 304     __ ldrsb(sp, __ pc());                             //   ldrsb sp, .
 305     __ ldrsh(r10, near);                               //   ldrsh r10, near
 306 
 307 // LitMemoryOp
 308     __ ldr(r5, __ pc());                               //   ldr r5, .
 309     __ ldrb(lr, near_post);                            //   ldrb lr, near_post
 310     __ ldrh(r5, near_post);                            //   ldrh r5, near_post
 311     __ ldrsb(r6, near);                                //   ldrsb r6, near
 312     __ ldrsh(r11, near);                               //   ldrsh r11, near
 313 
 314 // LitMemoryOp
 315     __ ldr(r7, near_post);                             //   ldr r7, near_post
 316     __ ldrb(r5, near_post);                            //   ldrb r5, near_post
 317     __ ldrh(r10, near);                                //   ldrh r10, near
 318     __ ldrsb(r6, near_post);                           //   ldrsb r6, near_post
 319     __ ldrsh(r9, __ pc());                             //   ldrsh r9, .
 320 
 321     __ bind(near_post);
 322 
 323 // MemoryRegRegSftOp
 324     __ ldr(r0, Address(r0, r10, ::ror(6), Address::ADD, Address::post)); // ldr r0, [r0], r10, ror #6
 325     __ ldrb(r3, Address(r8, lr, ::lsl(9), Address::ADD, Address::off)); //  ldrb r3, [r8, lr, lsl #9]
 326     __ str(r5, Address(sp, r3, ::lsl(15), Address::ADD, Address::off)); //  str r5, [sp, r3, lsl #15]
 327     __ strb(r9, Address(r9, r5, ::asr(2), Address::ADD, Address::post)); // strb r9, [r9], r5, asr #2
 328 
 329 // MemoryRegRegSftOp
 330     __ ldr(r5, Address(r4, r0, ::ror(6), Address::ADD, Address::off)); //   ldr r5, [r4, r0, ror #6]
 331     __ ldrb(lr, Address(r0, r4, ::lsr(9), Address::ADD, Address::off)); //  ldrb lr, [r0, r4, lsr #9]
 332     __ str(r5, Address(r12, r12, ::asr(5), Address::ADD, Address::post)); //    str r5, [r12], r12, asr #5
 333     __ strb(r3, Address(r1, r7, ::ror(12), Address::ADD, Address::pre)); // strb r3, [r1, r7, ror #12]!
 334 
 335 // MemoryRegRegSftOp
 336     __ ldr(r6, Address(r2, r3, ::rrx(), Address::ADD, Address::pre)); //    ldr r6, [r2, r3, rrx]!
 337     __ ldrb(r8, Address(lr, r2, ::asr(16), Address::ADD, Address::pre)); // ldrb r8, [lr, r2, asr #16]!
 338     __ str(r6, Address(r3, r6, ::ror(7), Address::ADD, Address::pre)); //   str r6, [r3, r6, ror #7]!
 339     __ strb(r3, Address(r8, r2, ::lsl(10), Address::ADD, Address::off)); // strb r3, [r8, r2, lsl #10]
 340 
 341 // MemoryRegRegSftOp
 342     __ ldr(r11, Address(sp, lr, ::lsl(8), Address::ADD, Address::off)); //  ldr r11, [sp, lr, lsl #8]
 343     __ ldrb(r10, Address(sp, r12, ::lsl(4), Address::ADD, Address::pre)); //    ldrb r10, [sp, r12, lsl #4]!
 344     __ str(sp, Address(r9, r2, ::asr(2), Address::ADD, Address::off)); //   str sp, [r9, r2, asr #2]
 345     __ strb(r7, Address(r11, lr, ::asr(14), Address::ADD, Address::pre)); //    strb r7, [r11, lr, asr #14]!
 346 
 347 // LdStOne
 348     __ ldrex(r12, r11);                                //   ldrex r12, [r11]
 349     __ ldrexb(r4, r12);                                //   ldrexb r4, [r12]
 350     __ ldrexh(r11, r11);                               //   ldrexh r11, [r11]
 351 
 352 // LdStTwo
 353     __ strex(r1, r7, lr);                              //   strex r1, r7, [lr]
 354     __ strexb(r12, r6, r4);                            //   strexb r12, r6, [r4]
 355     __ strexh(r4, r6, r7, Assembler::HS);              //   strexhHS r4, r6, [r7]
 356 
 357 // ThreeRegNon
 358     __ sadd16(r3, r4, r7);                             //   sadd16 r3, r4, r7
 359     __ sasx(r9, r10, r3, Assembler::AL);               //   sasxAL r9, r10, r3
 360     __ ssax(r12, r5, r6);                              //   ssax r12, r5, r6
 361     __ ssub16(r12, r5, lr);                            //   ssub16 r12, r5, lr
 362     __ sadd8(r0, r10, r7);                             //   sadd8 r0, r10, r7
 363     __ ssub8(r0, r8, r2, Assembler::VS);               //   ssub8VS r0, r8, r2
 364     __ qadd16(r11, r4, r5, Assembler::PL);             //   qadd16PL r11, r4, r5
 365     __ qasx(r11, r3, r12, Assembler::VS);              //   qasxVS r11, r3, r12
 366     __ qsax(r0, r3, r5);                               //   qsax r0, r3, r5
 367     __ ssub16(r10, r12, r5, Assembler::AL);            //   ssub16AL r10, r12, r5
 368     __ qadd8(r10, r6, lr, Assembler::CC);              //   qadd8CC r10, r6, lr
 369     __ qsub8(r10, r11, r7);                            //   qsub8 r10, r11, r7
 370     __ shadd16(r9, r4, lr, Assembler::PL);             //   shadd16PL r9, r4, lr
 371     __ shasx(r1, lr, r7);                              //   shasx r1, lr, r7
 372     __ shsax(r9, r11, r5, Assembler::LO);              //   shsaxLO r9, r11, r5
 373     __ shsub16(r3, r1, r11, Assembler::GE);            //   shsub16GE r3, r1, r11
 374     __ shadd8(sp, r5, r7, Assembler::GT);              //   shadd8GT sp, r5, r7
 375     __ shsub8(r1, r5, r7);                             //   shsub8 r1, r5, r7
 376 
 377 // ThreeRegNon
 378     __ uadd16(r10, r4, r7);                            //   uadd16 r10, r4, r7
 379     __ uasx(r1, r9, r7, Assembler::HS);                //   uasxHS r1, r9, r7
 380     __ usax(r11, sp, r7);                              //   usax r11, sp, r7
 381     __ usub16(r11, r4, lr);                            //   usub16 r11, r4, lr
 382     __ uadd8(r2, sp, r7, Assembler::LO);               //   uadd8LO r2, sp, r7
 383     __ usub8(r8, r10, lr, Assembler::GT);              //   usub8GT r8, r10, lr
 384     __ uqadd16(r3, r12, sp);                           //   uqadd16 r3, r12, sp
 385     __ uqasx(r4, sp, r6);                              //   uqasx r4, sp, r6
 386     __ uqsax(r1, r10, lr);                             //   uqsax r1, r10, lr
 387     __ uqsub16(r2, sp, lr, Assembler::LE);             //   uqsub16LE r2, sp, lr
 388     __ uqadd8(r1, r12, r5);                            //   uqadd8 r1, r12, r5
 389     __ uqsub8(r0, r4, sp, Assembler::GT);              //   uqsub8GT r0, r4, sp
 390     __ uhadd16(r0, r10, r5, Assembler::HI);            //   uhadd16HI r0, r10, r5
 391     __ uhasx(r11, r4, r7, Assembler::LE);              //   uhasxLE r11, r4, r7
 392     __ uhsax(r1, lr, r9, Assembler::GE);               //   uhsaxGE r1, lr, r9
 393     __ uhsub16(r2, r11, lr);                           //   uhsub16 r2, r11, lr
 394     __ uhadd8(r9, r4, r5, Assembler::GE);              //   uhadd8GE r9, r4, r5
 395     __ uhsub8(r2, sp, lr, Assembler::HI);              //   uhsub8HI r2, sp, lr
 396 
 397 // PKUPSATREV
 398     __ sxtab16(r10, r3, r7, ::ror(16));                //   sxtab16 r10, r3, r7, ROR #16
 399     __ sxtab(r9, r5, r7, ::ror(24), Assembler::CS);    //   sxtabCS r9, r5, r7, ROR #24
 400     __ sxtah(r3, r5, r7, ::ror(8));                    //   sxtah r3, r5, r7, ROR #8
 401     __ uxtab16(r8, r4, r6, ::ror(8), Assembler::AL);   //   uxtab16AL r8, r4, r6, ROR #8
 402     __ uxtab(r0, r11, sp, ::rrx(), Assembler::EQ);     //   uxtabEQ r0, r11, sp, ROR #0
 403     __ uxtah(r9, r12, r5, ::rrx());                    //   uxtah r9, r12, r5, ROR #0
 404 
 405 // PKUPSATREV
 406     __ sxtb16(r3, r11, ::ror(16), Assembler::GE);      //   sxtb16GE r3, r11, ROR #16
 407     __ sxtb(r2, r6, ::rrx(), Assembler::HI);           //   sxtbHI r2, r6, ROR #0
 408     __ sxth(r3, sp, ::ror(24), Assembler::GT);         //   sxthGT r3, sp, ROR #24
 409     __ uxtb16(r12, r5, ::ror(16));                     //   uxtb16 r12, r5, ROR #16
 410     __ uxtb(r12, r5, ::ror(16));                       //   uxtb r12, r5, ROR #16
 411     __ uxth(r8, r5, ::ror(16));                        //   uxth r8, r5, ROR #16
 412 
 413 // TwoRegNon
 414     __ rev(r10, r4, Assembler::EQ);                    //   revEQ r10, r4
 415     __ rev16(r8, r12, Assembler::GE);                  //   rev16GE r8, r12
 416     __ rbit(lr, r7);                                   //   rbit lr, r7
 417     __ revsh(sp, r7, Assembler::GT);                   //   revshGT sp, r7
 418 
 419 // ThreeRegNon
 420     __ sdiv(r9, sp, lr);                               //   sdiv r9, sp, lr
 421     __ udiv(r2, r12, r6);                              //   udiv r2, r12, r6
 422 
 423 // TwoRegTwoImm
 424     __ sbfx(r0, r1, (unsigned)20U, (unsigned)3U, Assembler::MI); // sbfxMI r0, r1, #20, #3
 425     __ ubfx(r9, r2, (unsigned)16U, (unsigned)15U);     //   ubfx r9, r2, #16, #15
 426     __ bfi(r1, r11, (unsigned)27U, (unsigned)3U, Assembler::HI); // bfiHI r1, r11, #27, #3
 427 
 428 // TwoRegTwoImm
 429     __ bfc(r3, (unsigned)7U, (unsigned)10U);           //   bfc r3, #7, #10
 430 
 431 // MultipleMemOp
 432     __ stmda(r6, 3435U, false);                        //   stmda r6, {r0, r1, r3, r5, r6, r8, r10, r11}
 433     __ stmed(r4, 14559U, false);                       //   stmed r4, {r0, r1, r2, r3, r4, r6, r7, r11, r12, sp}
 434     __ ldmda(r0, 57812U, false);                       //   ldmda r0, {r2, r4, r6, r7, r8, sp, lr, pc}
 435     __ ldmfa(r12, 39027U, true);                       //   ldmfa r12!, {r0, r1, r4, r5, r6, r11, r12, pc}
 436     __ stmia(r9, 12733U, true);                        //   stmia r9!, {r0, r2, r3, r4, r5, r7, r8, r12, sp}
 437     __ stmea(r11, 21955U, false);                      //   stmea r11, {r0, r1, r6, r7, r8, r10, r12, lr}
 438     __ ldmia(r12, 48418U, true);                       //   ldmia r12!, {r1, r5, r8, r10, r11, r12, sp, pc}
 439     __ ldmfd(sp, 41226U, true);                        //   ldmfd sp!, {r1, r3, r8, sp, pc}
 440     __ stmdb(r11, 8729U, true);                        //   stmdb r11!, {r0, r3, r4, r9, sp}
 441     __ stmfd(r9, 36309U, true);                        //   stmfd r9!, {r0, r2, r4, r6, r7, r8, r10, r11, pc}
 442     __ ldmdb(r5, 24667U, true);                        //   ldmdb r5!, {r0, r1, r3, r4, r6, sp, lr}
 443     __ ldmea(r1, 37287U, false);                       //   ldmea r1, {r0, r1, r2, r5, r7, r8, r12, pc}
 444     __ stmib(r11, 28266U, true);                       //   stmib r11!, {r1, r3, r5, r6, r9, r10, r11, sp, lr}
 445     __ stmfa(r11, 17671U, false);                      //   stmfa r11, {r0, r1, r2, r8, r10, lr}
 446     __ ldmib(r0, 21452U, true);                        //   ldmib r0!, {r2, r3, r6, r7, r8, r9, r12, lr}
 447     __ ldmed(r1, 11751U, false);                       //   ldmed r1, {r0, r1, r2, r5, r6, r7, r8, r10, r11, sp}
 448 
 449 // BranchLabel
 450     __ b(forth, Assembler::CS);                        //   bCS forth
 451     __ bl(__ pc(), Assembler::MI);                     //   blMI .
 452 
 453 // OneRegNon
 454     __ b(r0, Assembler::VS);                           //   bxVS r0
 455     __ bl(r3);                                         //   blx r3
 456 
 457 // BranchLabel
 458     __ b(__ pc(), Assembler::AL);                      //   bAL .
 459     __ bl(__ pc());                                    //   bl .
 460 
 461 // OneRegNon
 462     __ b(r0, Assembler::VS);                           //   bxVS r0
 463     __ bl(r5);                                         //   blx r5
 464 
 465 // BranchLabel
 466     __ b(forth, Assembler::LE);                        //   bLE forth
 467     __ bl(__ pc(), Assembler::MI);                     //   blMI .
 468 
 469 // OneRegNon
 470     __ b(r9, Assembler::NE);                           //   bxNE r9
 471     __ bl(r12);                                        //   blx r12
 472 
 473 // BranchLabel
 474     __ b(back);                                        //   b back
 475     __ bl(__ pc(), Assembler::HI);                     //   blHI .
 476 
 477 // OneRegNon
 478     __ b(r1, Assembler::VC);                           //   bxVC r1
 479     __ bl(r7, Assembler::GT);                          //   blxGT r7
 480 
 481 // BranchLabel
 482     __ b(back, Assembler::GE);                         //   bGE back
 483     __ bl(__ pc(), Assembler::HI);                     //   blHI .
 484 
 485 // OneRegNon
 486     __ b(r12);                                         //   bx r12
 487     __ bl(r7, Assembler::CC);                          //   blxCC r7
 488 
 489 // BranchLabel
 490     __ b(__ pc());                                     //   b .
 491     __ bl(back, Assembler::GT);                        //   blGT back
 492 
 493 // OneRegNon
 494     __ b(r1, Assembler::GE);                           //   bxGE r1
 495     __ bl(r0);                                         //   blx r0
 496 
 497 // BranchLabel
 498     __ b(__ pc());                                     //   b .
 499     __ bl(forth);                                      //   bl forth
 500 
 501 // OneRegNon
 502     __ b(lr, Assembler::GT);                           //   bxGT lr
 503     __ bl(r11, Assembler::NE);                         //   blxNE r11
 504 
 505 // BranchLabel
 506     __ b(__ pc(), Assembler::CS);                      //   bCS .
 507     __ bl(__ pc());                                    //   bl .
 508 
 509 // OneRegNon
 510     __ b(r10, Assembler::HS);                          //   bxHS r10
 511     __ bl(r4);                                         //   blx r4
 512 
 513 // BranchLabel
 514     __ b(back, Assembler::AL);                         //   bAL back
 515     __ bl(__ pc());                                    //   bl .
 516 
 517 // OneRegNon
 518     __ b(r12, Assembler::LO);                          //   bxLO r12
 519     __ bl(r8);                                         //   blx r8
 520 
 521 // BranchLabel
 522     __ b(forth);                                       //   b forth
 523     __ bl(__ pc());                                    //   bl .
 524 
 525 // OneRegNon
 526     __ b(r10);                                         //   bx r10
 527     __ bl(r1);                                         //   blx r1
 528 
 529 // ThreeFltNon
 530     __ vmla_f32(f4, f8, f12, Assembler::MI);            //  vmlaMI.f32 s4, s8, s12
 531     __ vmls_f32(f4, f10, f10);                           // vmls.f32 s4, s10, s10
 532     __ vnmla_f32(f2, f10, f12);                          // vnmla.f32 s2, s10, s12
 533     __ vnmls_f32(f8, f6, f8, Assembler::LT);           //   vnmlsLT.f32 s8, s6, s8
 534     __ vnmul_f32(f6, f12, f14, Assembler::MI);           // vnmulMI.f32 s6, s12, s14
 535     __ vadd_f32(f0, f2, f0);                           //   vadd.f32 s0, s2, s0
 536     __ vsub_f32(f2, f4, f10, Assembler::AL);            //  vsubAL.f32 s2, s4, s10
 537     __ vdiv_f32(f0, f2, f12, Assembler::CS);            //  vdivCS.f32 s0, s2, s12
 538 
 539 // ThreeFltNon
 540     __ vmla_f64(d0, d3, d6);                           //   vmla.f64 d0, d3, d6
 541     __ vmls_f64(d0, d1, d5);                           //   vmls.f64 d0, d1, d5
 542     __ vnmla_f64(d1, d4, d6);                          //   vnmla.f64 d1, d4, d6
 543     __ vnmls_f64(d0, d1, d1, Assembler::NE);           //   vnmlsNE.f64 d0, d1, d1
 544     __ vnmul_f64(d3, d5, d5, Assembler::NE);           //   vnmulNE.f64 d3, d5, d5
 545     __ vadd_f64(d0, d2, d4, Assembler::LO);            //   vaddLO.f64 d0, d2, d4
 546     __ vsub_f64(d1, d2, d4);                           //   vsub.f64 d1, d2, d4
 547     __ vdiv_f64(d0, d1, d5, Assembler::MI);            //   vdivMI.f64 d0, d1, d5
 548 
 549 // TwoFltNon
 550     __ vabs_f32(f6, f6);                               //   vabs.f32 s6, s6
 551     __ vneg_f32(f6, f8, Assembler::PL);                //   vnegPL.f32 s6, s8
 552     __ vsqrt_f32(f0, f8);                              //   vsqrt.f32 s0, s8
 553 
 554 // TwoFltNon
 555     __ vabs_f64(d0, d4);                               //   vabs.f64 d0, d4
 556     __ vneg_f64(d1, d4);                               //   vneg.f64 d1, d4
 557     __ vsqrt_f64(d0, d1);                              //   vsqrt.f64 d0, d1
 558 
 559 // vmov_f32
 560     __ vmov_f32(f0, lr, Assembler::PL);                //   vmovPL.f32 s0, lr
 561 
 562 // vmov_f32
 563     __ vmov_f32(r11, f8);                              //   vmov.f32 r11, s8
 564 
 565 // vmov_f64
 566     __ vmov_f64(d1, r11, lr, Assembler::LT);           //   vmovLT.f64 d1, r11, lr
 567 
 568 // vmov_f64
 569     __ vmov_f64(r7, r5, d5);                           //   vmov.f64 r7, r5, d5
 570 
 571 // vmov_f32
 572     __ vmov_f32(f8, f12);                               //  vmov.f32 s8, s12
 573 
 574 // vmov_f64
 575     __ vmov_f64(d1, d2, Assembler::HI);                //   vmovHI.f64 d1, d2
 576 
 577 // vmov_f32
 578     __ vmov_f32(f4, 1.0f, Assembler::VS);              //   vmovVS.f32 s4, #1.0
 579 
 580 // vmov_f64
 581     __ vmov_f64(d2, 1.0);                              //   vmov.f64 d2, #1.0
 582 
 583 // vmov_f32
 584     __ vmov_f32(f6, 2.0f);                             //   vmov.f32 s6, #2.0
 585 
 586 // vmov_f64
 587     __ vmov_f64(d1, 2.0);                              //   vmov.f64 d1, #2.0
 588 
 589 // vector memory
 590     __ vldr_f32(f4, Address(r5, 116));                 //   vldr.f32 s4, [r5, #116]
 591     __ vstr_f32(f2, Address(r1, 56), Assembler::CC);   //   vstrCC.f32 s2, [r1, #56]
 592 
 593 // vector memory
 594     __ vldr_f64(d7, Address(r5, 16), Assembler::NE);   //   vldrNE.f64 d7, [r5, #16]
 595     __ vstr_f64(d6, Address(r1, 228));                 //   vstr.f64 d6, [r1, #228]
 596 
 597     __ bind(near_flt);
 598 
 599 // vector memory
 600     __ vldr_f32(f2, near_post_flt);                    //   vldr.f32 s2, near_post_flt
 601     __ vstr_f32(f6, near_post_flt);                    //   vstr.f32 s6, near_post_flt
 602 
 603 // vector memory
 604     __ vldr_f64(d2, near_flt, Assembler::LT);          //   vldrLT.f64 d2, near_flt
 605     __ vstr_f64(d3, __ pc(), Assembler::GT);           //   vstrGT.f64 d3, .
 606 
 607 // vector memory
 608     __ vldr_f32(f4, near_post_flt, Assembler::CC);     //   vldrCC.f32 s4, near_post_flt
 609     __ vstr_f32(f0, near_post_flt);                    //   vstr.f32 s0, near_post_flt
 610 
 611 // vector memory
 612     __ vldr_f64(d4, near_post_flt, Assembler::GT);     //   vldrGT.f64 d4, near_post_flt
 613     __ vstr_f64(d0, near_flt);                         //   vstr.f64 d0, near_flt
 614 
 615 // vector memory
 616     __ vldr_f32(f8, near_post_flt);                    //   vldr.f32 s8, near_post_flt
 617     __ vstr_f32(f6, near_post_flt);                    //   vstr.f32 s6, near_post_flt
 618 
 619 // vector memory
 620     __ vldr_f64(d4, near_flt, Assembler::PL);          //   vldrPL.f64 d4, near_flt
 621     __ vstr_f64(d5, near_flt);                         //   vstr.f64 d5, near_flt
 622 
 623 // vector memory
 624     __ vldr_f32(f8, near_post_flt, Assembler::LS);     //   vldrLS.f32 s8, near_post_flt
 625     __ vstr_f32(f12, __ pc(), Assembler::CC);           //  vstrCC.f32 s12, .
 626 
 627 // vector memory
 628     __ vldr_f64(d6, near_post_flt, Assembler::AL);     //   vldrAL.f64 d6, near_post_flt
 629     __ vstr_f64(d1, near_post_flt, Assembler::LT);     //   vstrLT.f64 d1, near_post_flt
 630 
 631     __ bind(near_post_flt);
 632 
 633 // FltMultMemOp
 634     __ vldmia_f32(r1, FloatRegSet::of(f4).bits(), false);                      //   vldmia.f32 r1, {s4}
 635     __ vstmia_f32(r6, FloatRegSet::of(f4).bits(), true, Assembler::CS);        //   vstmiaCS.f32 r6!, {s4}
 636 
 637 // DblMultMemOp
 638     __ vldmia_f64(r9, DoubleFloatRegSet::of(d1, d2, d3, d4).bits(), true);     //   vldmia.f64 r9!, {d1, d2, d3, d4}
 639     __ vstmia_f64(r3, DoubleFloatRegSet::of(d6, d7).bits(), true);             //   vstmia.f64 r3!, {d6, d7}
 640 
 641 // FltMultMemOp
 642     __ vldmdb_f32(r2, FloatRegSet::of(f6).bits(), Assembler::VS);              //   vldmdbVS.f32 r2!, {s6}
 643     __ vstmdb_f32(r6, FloatRegSet::of(f14).bits());                            //   vstmdb.f32 r6!, {s14}
 644 
 645 // DblMultMemOp
 646     __ vldmdb_f64(sp, DoubleFloatRegSet::of(d4, d5, d6, d7).bits());           //   vldmdb.f64 sp!, {d4, d5, d6, d7}
 647     __ vstmdb_f64(r0, DoubleFloatRegSet::of(d5, d6, d7).bits());               //   vstmdb.f64 r0!, {d5, d6, d7}
 648 
 649 // vcmp_f32
 650     __ vcmp_f32(f2, f2);                               //   vcmp.f32 s2, s2
 651 
 652 // vcmpe_f32
 653     __ vcmpe_f32(f8, f8, Assembler::VC);               //   vcmpeVC.f32 s8, s8
 654 
 655 // vcmp_f64
 656     __ vcmp_f64(d0, d6);                               //   vcmp.f64 d0, d6
 657 
 658 // vcmpe_f64
 659     __ vcmpe_f64(d3, d7, Assembler::GE);               //   vcmpeGE.f64 d3, d7
 660 
 661 // vcmp_f32
 662     __ vcmp_f32(f2, 0.0f, Assembler::LT);              //   vcmpLT.f32 s2, #0.0
 663 
 664 // vcmpe_f32
 665     __ vcmpe_f32(f14, 0.0f, Assembler::GT);             //  vcmpeGT.f32 s14, #0.0
 666 
 667 // vcmp_f64
 668     __ vcmp_f64(d4, 0.0);                              //   vcmp.f64 d4, #0.0
 669 
 670 // vcmpe_f64
 671     __ vcmpe_f64(d1, 0.0);                             //   vcmpe.f64 d1, #0.0
 672 
 673 // vcvt
 674     __ vcvt_s32_f32(f2, f6, Assembler::VS);            //   vcvtVS.s32.f32 s2, s6
 675     __ vcvt_u32_f32(f6, f14, Assembler::GT);            //  vcvtGT.u32.f32 s6, s14
 676     __ vcvt_f32_s32(f0, f2, Assembler::CC);            //   vcvtCC.f32.s32 s0, s2
 677     __ vcvt_f32_u32(f2, f4, Assembler::CC);            //   vcvtCC.f32.u32 s2, s4
 678 
 679 // vcvt
 680     __ vcvt_s32_f64(f4, d4, Assembler::HI);            //   vcvtHI.s32.f64 s4, d4
 681     __ vcvt_u32_f64(f6, d6, Assembler::HI);            //   vcvtHI.u32.f64 s6, d6
 682     __ vcvt_f32_f64(f6, d7, Assembler::LS);            //   vcvtLS.f32.f64 s6, d7
 683 
 684 // vcvt
 685     __ vcvt_f64_s32(d3, f8);                           //   vcvt.f64.s32 d3, s8
 686     __ vcvt_f64_u32(d5, f14, Assembler::EQ);            //  vcvtEQ.f64.u32 d5, s14
 687     __ vcvt_f64_f32(d4, f10, Assembler::AL);            //  vcvtAL.f64.f32 d4, s10
 688 
 689 // BKPT
 690     __ bkpt((unsigned)26U);                            //   bkpt #26
 691 
 692     __ bind(forth);
 693 
 694 /*
 695 aarch32ops.o:     file format elf32-littlearm
 696 
 697 
 698 Disassembly of section .text:
 699 
 700 00000000 <back>:
 701    0:   e082852b    add r8, r2, fp, lsr #10
 702    4:   009310c7    addseq  r1, r3, r7, asr #1
 703    8:   e0290284    eor r0, r9, r4, lsl #5
 704    c:   c0329066    eorsgt  r9, r2, r6, rrx
 705   10:   c04c000e    subgt   r0, ip, lr
 706   14:   00528364    subseq  r8, r2, r4, ror #6
 707   18:   e069818d    rsb r8, r9, sp, lsl #3
 708   1c:   60708864    rsbsvs  r8, r0, r4, ror #16
 709   20:   d08597a1    addle   r9, r5, r1, lsr #15
 710   24:   e09d12c6    adds    r1, sp, r6, asr #5
 711   28:   c0adb0c7    adcgt   fp, sp, r7, asr #1
 712   2c:   e0b80329    adcs    r0, r8, r9, lsr #6
 713   30:   e0c392e6    sbc r9, r3, r6, ror #5
 714   34:   80dd1845    sbcshi  r1, sp, r5, asr #16
 715   38:   30e28486    rsccc   r8, r2, r6, lsl #9
 716   3c:   e0f4a76d    rscs    sl, r4, sp, ror #14
 717   40:   118db785    orrne   fp, sp, r5, lsl #15
 718   44:   e19a9764    orrs    r9, sl, r4, ror #14
 719   48:   e1cd90e5    bic r9, sp, r5, ror #1
 720   4c:   e1d20547    bics    r0, r2, r7, asr #10
 721   50:   e086d777    add sp, r6, r7, ror r7
 722   54:   809c4776    addshi  r4, ip, r6, ror r7
 723   58:   90265c57    eorls   r5, r6, r7, asr ip
 724   5c:   e035841d    eors    r8, r5, sp, lsl r4
 725   60:   e04c2055    sub r2, ip, r5, asr r0
 726   64:   20539c17    subscs  r9, r3, r7, lsl ip
 727   68:   c06c9614    rsbgt   r9, ip, r4, lsl r6
 728   6c:   e072811c    rsbs    r8, r2, ip, lsl r1
 729   70:   e08c4d1d    add r4, ip, sp, lsl sp
 730   74:   e09b8d76    adds    r8, fp, r6, ror sp
 731   78:   10a20415    adcne   r0, r2, r5, lsl r4
 732   7c:   e0beb256    adcs    fp, lr, r6, asr r2
 733   80:   80ca835e    sbchi   r8, sl, lr, asr r3
 734   84:   e0dc1615    sbcs    r1, ip, r5, lsl r6
 735   88:   60e54a7e    rscvs   r4, r5, lr, ror sl
 736   8c:   e0fc181d    rscs    r1, ip, sp, lsl r8
 737   90:   61818076    orrvs   r8, r1, r6, ror r0
 738   94:   e19db577    orrs    fp, sp, r7, ror r5
 739   98:   e1ce4216    bic r4, lr, r6, lsl r2
 740   9c:   e1dba31d    bics    sl, fp, sp, lsl r3
 741   a0:   828d8261    addhi   r8, sp, #268435462  ; 0x10000006
 742   a4:   e29ed69b    adds    sp, lr, #162529280  ; 0x9b00000
 743   a8:   e226e87d    eor lr, r6, #8192000    ; 0x7d0000
 744   ac:   e2332f49    eors    r2, r3, #292    ; 0x124
 745   b0:   e24d46d9    sub r4, sp, #227540992  ; 0xd900000
 746   b4:   b25e1402    subslt  r1, lr, #33554432   ; 0x2000000
 747   b8:   e2650325    rsb r0, r5, #-1811939328    ; 0x94000000
 748   bc:   3274882f    rsbscc  r8, r4, #3080192    ; 0x2f0000
 749   c0:   b2849102    addlt   r9, r4, #-2147483648    ; 0x80000000
 750   c4:   e2948902    adds    r8, r4, #32768  ; 0x8000
 751   c8:   22aeac2a    adccs   sl, lr, #10752  ; 0x2a00
 752   cc:   e2b6aabd    adcs    sl, r6, #774144 ; 0xbd000
 753   d0:   e2cc2426    sbc r2, ip, #637534208  ; 0x26000000
 754   d4:   e2da85a5    sbcs    r8, sl, #692060160  ; 0x29400000
 755   d8:   e2e6d871    rsc sp, r6, #7405568    ; 0x710000
 756   dc:   12fba6e9    rscsne  sl, fp, #244318208  ; 0xe900000
 757   e0:   638737ff    orrvs   r3, r7, #66846720   ; 0x3fc0000
 758   e4:   03952951    orrseq  r2, r5, #1327104    ; 0x144000
 759   e8:   63c18eea    bicvs   r8, r1, #3744   ; 0xea0
 760   ec:   33d2020a    bicscc  r0, r2, #-1610612736    ; 0xa0000000
 761   f0:   e118028d    tst r8, sp, lsl #5
 762   f4:   e13601a7    teq r6, r7, lsr #3
 763   f8:   e15c0164    cmp ip, r4, ror #2
 764   fc:   b1750807    cmnlt   r5, r7, lsl #16
 765  100:   e112073e    tst r2, lr, lsr r7
 766  104:   31300572    teqcc   r0, r2, ror r5
 767  108:   915e0b37    cmpls   lr, r7, lsr fp
 768  10c:   617a0b17    cmnvs   sl, r7, lsl fp
 769  110:   e3120585    tst r2, #557842432  ; 0x21400000
 770  114:   433e071b    teqmi   lr, #7077888    ; 0x6c0000
 771  118:   e355030e    cmp r5, #939524096  ; 0x38000000
 772  11c:   3377010a    cmncc   r7, #-2147483646    ; 0x80000002
 773  120:   e1a00b84    lsl r0, r4, #23
 774  124:   e1b01484    lsls    r1, r4, #9
 775  128:   e1a001aa    lsr r0, sl, #3
 776  12c:   e1b00a2a    lsrs    r0, sl, #20
 777  130:   e1a015c9    asr r1, r9, #11
 778  134:   61b0254b    asrsvs  r2, fp, #10
 779  138:   31a08fe2    rorcc   r8, r2, #31
 780  13c:   e1b0946c    rors    r9, ip, #8
 781  140:   e1a0877e    ror r8, lr, r7
 782  144:   e1b0c473    rors    ip, r3, r4
 783  148:   c1a0ce1d    lslgt   ip, sp, lr
 784  14c:   e1b0c61d    lsls    ip, sp, r6
 785  150:   c1a00931    lsrgt   r0, r1, r9
 786  154:   c1b0bc33    lsrsgt  fp, r3, ip
 787  158:   d1a0265c    asrle   r2, ip, r6
 788  15c:   b1b0165a    asrslt  r1, sl, r6
 789  160:   e1a0a003    mov sl, r3
 790  164:   e1b00009    movs    r0, r9
 791  168:   73a03e29    movvc   r3, #656    ; 0x290
 792  16c:   e3b0497e    movs    r4, #2064384    ; 0x1f8000
 793  170:   e1a0c1a6    lsr ip, r6, #3
 794  174:   71b0554d    asrsvc  r5, sp, #10
 795  178:   e1a0137e    ror r1, lr, r3
 796  17c:   01b0897c    rorseq  r8, ip, r9
 797  180:   330cbf31    movwcc  fp, #53041  ; 0xcf31
 798  184:   33429bf7    movtcc  r9, #11255  ; 0x2bf7
 799  188:   d001059d    mulle   r1, sp, r5
 800  18c:   e0100b9a    muls    r0, sl, fp
 801  190:   e0207c93    mla r0, r3, ip, r7
 802  194:   0038639b    mlaseq  r8, fp, r3, r6
 803  198:   e084e695    umull   lr, r4, r5, r6
 804  19c:   e0940796    umulls  r0, r4, r6, r7
 805  1a0:   e0a08e9b    umlal   r8, r0, fp, lr
 806  1a4:   e0b4b79e    umlals  fp, r4, lr, r7
 807  1a8:   20c51796    smullcs r1, r5, r6, r7
 808  1ac:   40db059c    smullsmi    r0, fp, ip, r5
 809  1b0:   e0498592    umaal   r8, r9, r2, r5
 810  1b4:   0060ed94    mlseq   r0, r4, sp, lr
 811  1b8:   510d9054    qaddpl  r9, r4, sp
 812  1bc:   4125005c    qsubmi  r0, ip, r5
 813  1c0:   e1473055    qdadd   r3, r5, r7
 814  1c4:   e1649052    qdsub   r9, r2, r4
 815  1c8:   e101658c    smlabb  r1, ip, r5, r6
 816  1cc:   e1006cca    smlabt  r0, sl, ip, r6
 817  1d0:   e108e3a1    smlatb  r8, r1, r3, lr
 818  1d4:   e10176ed    smlatt  r1, sp, r6, r7
 819  1d8:   e1206483    smlawb  r0, r3, r4, r6
 820  1dc:   e12b7ec4    smlawt  fp, r4, lr, r7
 821  1e0:   e14a0786    smlalbb r0, sl, r6, r7
 822  1e4:   914b3ec4    smlalbtls   r3, fp, r4, lr
 823  1e8:   e14b8ca3    smlaltb r8, fp, r3, ip
 824  1ec:   e14185e3    smlaltt r8, r1, r3, r5
 825  1f0:   21220dac    smulwbcs    r2, ip, sp
 826  1f4:   e12806ec    smulwt  r8, ip, r6
 827  1f8:   a1620e86    smulbbge    r2, r6, lr
 828  1fc:   e16807cc    smulbt  r8, ip, r7
 829  200:   016a0ea3    smultbeq    sl, r3, lr
 830  204:   e1600de3    smultt  r0, r3, sp
 831  208:   e697a009    ldr sl, [r7], r9
 832  20c:   e5d900c4    ldrb    r0, [r9, #196]  ; 0xc4
 833  210:   e1b4e0b6    ldrh    lr, [r4, r6]!
 834  214:   e1f96ed8    ldrsb   r6, [r9, #232]! ; 0xe8
 835  218:   e09120f1    ldrsh   r2, [r1], r1
 836  21c:   e6890004    str r0, [r9], r4
 837  220:   e5e5305c    strb    r3, [r5, #92]!  ; 0x5c
 838  224:   e1c82ab0    strh    r2, [r8, #160]  ; 0xa0
 839  228:   e79c8008    ldr r8, [ip, r8]
 840  22c:   e4dab010    ldrb    fp, [sl], #16
 841  230:   e19ab0b6    ldrh    fp, [sl, r6]
 842  234:   e1bb50da    ldrsb   r5, [fp, sl]!
 843  238:   e19360f7    ldrsh   r6, [r3, r7]
 844  23c:   e7ad7005    str r7, [sp, r5]!
 845  240:   e5ca2000    strb    r2, [sl]
 846  244:   e08460b3    strh    r6, [r4], r3
 847  248:   e59ca000    ldr sl, [ip]
 848  24c:   e4db4084    ldrb    r4, [fp], #132  ; 0x84
 849  250:   e09990bc    ldrh    r9, [r9], ip
 850  254:   e0d399d4    ldrsb   r9, [r3], #148  ; 0x94
 851  258:   e1f2b9f4    ldrsh   fp, [r2, #148]! ; 0x94
 852  25c:   e78db00b    str fp, [sp, fp]
 853  260:   e7cd100a    strb    r1, [sp, sl]
 854  264:   e08ea0b9    strh    sl, [lr], r9
 855  268:   e7b36004    ldr r6, [r3, r4]!
 856  26c:   e7f6400d    ldrb    r4, [r6, sp]!
 857  270:   e09760ba    ldrh    r6, [r7], sl
 858  274:   e1b600db    ldrsb   r0, [r6, fp]!
 859  278:   e096a0fd    ldrsh   sl, [r6], sp
 860  27c:   e783700c    str r7, [r3, ip]
 861  280:   e7e83001    strb    r3, [r8, r1]!
 862  284:   e1cc44b0    strh    r4, [ip, #64]   ; 0x40
 863 
 864 00000288 <near>:
 865  288:   e51f1008    ldr r1, [pc, #-8]   ; 288 <near>
 866  28c:   e55f7008    ldrb    r7, [pc, #-8]   ; 28c <near+0x4>
 867  290:   e15f21b0    ldrh    r2, [pc, #-16]  ; 288 <near>
 868  294:   e15fa0d8    ldrsb   sl, [pc, #-8]   ; 294 <near+0xc>
 869  298:   e1dfe3f8    ldrsh   lr, [pc, #56]   ; 2d8 <near_post>
 870  29c:   e51f2008    ldr r2, [pc, #-8]   ; 29c <near+0x14>
 871  2a0:   e55f3008    ldrb    r3, [pc, #-8]   ; 2a0 <near+0x18>
 872  2a4:   e1df72bc    ldrh    r7, [pc, #44]   ; 2d8 <near_post>
 873  2a8:   e15fd0d8    ldrsb   sp, [pc, #-8]   ; 2a8 <near+0x20>
 874  2ac:   e15fa2fc    ldrsh   sl, [pc, #-44]  ; 288 <near>
 875  2b0:   e51f5008    ldr r5, [pc, #-8]   ; 2b0 <near+0x28>
 876  2b4:   e5dfe01c    ldrb    lr, [pc, #28]   ; 2d8 <near_post>
 877  2b8:   e1df51b8    ldrh    r5, [pc, #24]   ; 2d8 <near_post>
 878  2bc:   e15f63dc    ldrsb   r6, [pc, #-60]  ; 288 <near>
 879  2c0:   e15fb4f0    ldrsh   fp, [pc, #-64]  ; 288 <near>
 880  2c4:   e59f700c    ldr r7, [pc, #12]   ; 2d8 <near_post>
 881  2c8:   e5df5008    ldrb    r5, [pc, #8]    ; 2d8 <near_post>
 882  2cc:   e15fa4bc    ldrh    sl, [pc, #-76]  ; 288 <near>
 883  2d0:   e1df60d0    ldrsb   r6, [pc]    ; 2d8 <near_post>
 884  2d4:   e15f90f8    ldrsh   r9, [pc, #-8]   ; 2d4 <near+0x4c>
 885 
 886 000002d8 <near_post>:
 887  2d8:   e690036a    ldr r0, [r0], sl, ror #6
 888  2dc:   e7d8348e    ldrb    r3, [r8, lr, lsl #9]
 889  2e0:   e78d5783    str r5, [sp, r3, lsl #15]
 890  2e4:   e6c99145    strb    r9, [r9], r5, asr #2
 891  2e8:   e7945360    ldr r5, [r4, r0, ror #6]
 892  2ec:   e7d0e4a4    ldrb    lr, [r0, r4, lsr #9]
 893  2f0:   e68c52cc    str r5, [ip], ip, asr #5
 894  2f4:   e7e13667    strb    r3, [r1, r7, ror #12]!
 895  2f8:   e7b26063    ldr r6, [r2, r3, rrx]!
 896  2fc:   e7fe8842    ldrb    r8, [lr, r2, asr #16]!
 897  300:   e7a363e6    str r6, [r3, r6, ror #7]!
 898  304:   e7c83502    strb    r3, [r8, r2, lsl #10]
 899  308:   e79db40e    ldr fp, [sp, lr, lsl #8]
 900  30c:   e7fda20c    ldrb    sl, [sp, ip, lsl #4]!
 901  310:   e789d142    str sp, [r9, r2, asr #2]
 902  314:   e7eb774e    strb    r7, [fp, lr, asr #14]!
 903  318:   e19bcf9f    ldrex   r12, [fp]
 904  31c:   e1dc4f9f    ldrexb  r4, [ip]
 905  320:   e1fbbf9f    ldrexh  fp, [fp]
 906  324:   e18e1f97    strex   r1, r7, [lr]
 907  328:   e1c4cf96    strexb  ip, r6, [r4]
 908  32c:   21e74f96    strexhcs    r4, r6, [r7]
 909  330:   e6143f17    sadd16  r3, r4, r7
 910  334:   e61a9f33    sasx    r9, sl, r3
 911  338:   e615cf56    ssax    ip, r5, r6
 912  33c:   e615cf7e    ssub16  ip, r5, lr
 913  340:   e61a0f97    sadd8   r0, sl, r7
 914  344:   66180ff2    ssub8vs r0, r8, r2
 915  348:   5624bf15    qadd16pl    fp, r4, r5
 916  34c:   6623bf3c    qasxvs  fp, r3, ip
 917  350:   e6230f55    qsax    r0, r3, r5
 918  354:   e61caf75    ssub16  sl, ip, r5
 919  358:   3626af9e    qadd8cc sl, r6, lr
 920  35c:   e62baff7    qsub8   sl, fp, r7
 921  360:   56349f1e    shadd16pl   r9, r4, lr
 922  364:   e63e1f37    shasx   r1, lr, r7
 923  368:   363b9f55    shsaxcc r9, fp, r5
 924  36c:   a6313f7b    shsub16ge   r3, r1, fp
 925  370:   c635df97    shadd8gt    sp, r5, r7
 926  374:   e6351ff7    shsub8  r1, r5, r7
 927  378:   e654af17    uadd16  sl, r4, r7
 928  37c:   26591f37    uasxcs  r1, r9, r7
 929  380:   e65dbf57    usax    fp, sp, r7
 930  384:   e654bf7e    usub16  fp, r4, lr
 931  388:   365d2f97    uadd8cc r2, sp, r7
 932  38c:   c65a8ffe    usub8gt r8, sl, lr
 933  390:   e66c3f1d    uqadd16 r3, ip, sp
 934  394:   e66d4f36    uqasx   r4, sp, r6
 935  398:   e66a1f5e    uqsax   r1, sl, lr
 936  39c:   d66d2f7e    uqsub16le   r2, sp, lr
 937  3a0:   e66c1f95    uqadd8  r1, ip, r5
 938  3a4:   c6640ffd    uqsub8gt    r0, r4, sp
 939  3a8:   867a0f15    uhadd16hi   r0, sl, r5
 940  3ac:   d674bf37    uhasxle fp, r4, r7
 941  3b0:   a67e1f59    uhsaxge r1, lr, r9
 942  3b4:   e67b2f7e    uhsub16 r2, fp, lr
 943  3b8:   a6749f95    uhadd8ge    r9, r4, r5
 944  3bc:   867d2ffe    uhsub8hi    r2, sp, lr
 945  3c0:   e683a877    sxtab16 sl, r3, r7, ror #16
 946  3c4:   26a59c77    sxtabcs r9, r5, r7, ror #24
 947  3c8:   e6b53477    sxtah   r3, r5, r7, ror #8
 948  3cc:   e6c48476    uxtab16 r8, r4, r6, ror #8
 949  3d0:   06eb007d    uxtabeq r0, fp, sp
 950  3d4:   e6fc9075    uxtah   r9, ip, r5
 951  3d8:   a68f387b    sxtb16ge    r3, fp, ror #16
 952  3dc:   86af2076    sxtbhi  r2, r6
 953  3e0:   c6bf3c7d    sxthgt  r3, sp, ror #24
 954  3e4:   e6cfc875    uxtb16  ip, r5, ror #16
 955  3e8:   e6efc875    uxtb    ip, r5, ror #16
 956  3ec:   e6ff8875    uxth    r8, r5, ror #16
 957  3f0:   06bfaf34    reveq   sl, r4
 958  3f4:   a6bf8fbc    rev16ge r8, ip
 959  3f8:   e6ffef37    rbit    lr, r7
 960  3fc:   c6ffdfb7    revshgt sp, r7
 961  400:   e719fe1d    sdiv    r9, sp, lr
 962  404:   e732f61c    udiv    r2, ip, r6
 963  408:   47a20a51    sbfxmi  r0, r1, #20, #3
 964  40c:   e7ee9852    ubfx    r9, r2, #16, #15
 965  410:   87dd1d9b    bfihi   r1, fp, #27, #3
 966  414:   e7d0339f    bfc r3, #7, #10
 967  418:   e8060d6b    stmda   r6, {r0, r1, r3, r5, r6, r8, sl, fp}
 968  41c:   e80438df    stmda   r4, {r0, r1, r2, r3, r4, r6, r7, fp, ip, sp}
 969  420:   e810e1d4    ldmda   r0, {r2, r4, r6, r7, r8, sp, lr, pc}
 970  424:   e83c9873    ldmda   ip!, {r0, r1, r4, r5, r6, fp, ip, pc}
 971  428:   e8a931bd    stmia   r9!, {r0, r2, r3, r4, r5, r7, r8, ip, sp}
 972  42c:   e88b55c3    stm fp, {r0, r1, r6, r7, r8, sl, ip, lr}
 973  430:   e8bcbd22    ldm ip!, {r1, r5, r8, sl, fp, ip, sp, pc}
 974  434:   e8bda10a    pop {r1, r3, r8, sp, pc}
 975  438:   e92b2219    stmdb   fp!, {r0, r3, r4, r9, sp}
 976  43c:   e9298dd5    stmdb   r9!, {r0, r2, r4, r6, r7, r8, sl, fp, pc}
 977  440:   e935605b    ldmdb   r5!, {r0, r1, r3, r4, r6, sp, lr}
 978  444:   e91191a7    ldmdb   r1, {r0, r1, r2, r5, r7, r8, ip, pc}
 979  448:   e9ab6e6a    stmib   fp!, {r1, r3, r5, r6, r9, sl, fp, sp, lr}
 980  44c:   e98b4507    stmib   fp, {r0, r1, r2, r8, sl, lr}
 981  450:   e9b053cc    ldmib   r0!, {r2, r3, r6, r7, r8, r9, ip, lr}
 982  454:   e9912de7    ldmib   r1, {r0, r1, r2, r5, r6, r7, r8, sl, fp, sp}
 983  458:   2a000075    bcs 634 <forth>
 984  45c:   4bfffffe    blmi    45c <near_post+0x184>
 985  460:   612fff10    bxvs    r0
 986  464:   e12fff33    blx r3
 987  468:   eafffffe    b   468 <near_post+0x190>
 988  46c:   ebfffffe    bl  46c <near_post+0x194>
 989  470:   612fff10    bxvs    r0
 990  474:   e12fff35    blx r5
 991  478:   da00006d    ble 634 <forth>
 992  47c:   4bfffffe    blmi    47c <near_post+0x1a4>
 993  480:   112fff19    bxne    r9
 994  484:   e12fff3c    blx ip
 995  488:   eafffedc    b   0 <back>
 996  48c:   8bfffffe    blhi    48c <near_post+0x1b4>
 997  490:   712fff11    bxvc    r1
 998  494:   c12fff37    blxgt   r7
 999  498:   aafffed8    bge 0 <back>
1000  49c:   8bfffffe    blhi    49c <near_post+0x1c4>
1001  4a0:   e12fff1c    bx  ip
1002  4a4:   312fff37    blxcc   r7
1003  4a8:   eafffffe    b   4a8 <near_post+0x1d0>
1004  4ac:   cbfffed3    blgt    0 <back>
1005  4b0:   a12fff11    bxge    r1
1006  4b4:   e12fff30    blx r0
1007  4b8:   eafffffe    b   4b8 <near_post+0x1e0>
1008  4bc:   eb00005c    bl  634 <forth>
1009  4c0:   c12fff1e    bxgt    lr
1010  4c4:   112fff3b    blxne   fp
1011  4c8:   2afffffe    bcs 4c8 <near_post+0x1f0>
1012  4cc:   ebfffffe    bl  4cc <near_post+0x1f4>
1013  4d0:   212fff1a    bxcs    sl
1014  4d4:   e12fff34    blx r4
1015  4d8:   eafffec8    b   0 <back>
1016  4dc:   ebfffffe    bl  4dc <near_post+0x204>
1017  4e0:   312fff1c    bxcc    ip
1018  4e4:   e12fff38    blx r8
1019  4e8:   ea000051    b   634 <forth>
1020  4ec:   ebfffffe    bl  4ec <near_post+0x214>
1021  4f0:   e12fff1a    bx  sl
1022  4f4:   e12fff31    blx r1
1023  4f8:   4e042a06    vmlami.f32  s4, s8, s12
1024  4fc:   ee052a45    vmls.f32    s4, s10, s10
1025  500:   ee151a46    vnmla.f32   s2, s10, s12
1026  504:   be134a04    vnmlslt.f32 s8, s6, s8
1027  508:   4e263a47    vnmulmi.f32 s6, s12, s14
1028  50c:   ee310a00    vadd.f32    s0, s2, s0
1029  510:   ee321a45    vsub.f32    s2, s4, s10
1030  514:   2e810a06    vdivcs.f32  s0, s2, s12
1031  518:   ee030b06    vmla.f64    d0, d3, d6
1032  51c:   ee010b45    vmls.f64    d0, d1, d5
1033  520:   ee141b46    vnmla.f64   d1, d4, d6
1034  524:   1e110b01    vnmlsne.f64 d0, d1, d1
1035  528:   1e253b45    vnmulne.f64 d3, d5, d5
1036  52c:   3e320b04    vaddcc.f64  d0, d2, d4
1037  530:   ee321b44    vsub.f64    d1, d2, d4
1038  534:   4e810b05    vdivmi.f64  d0, d1, d5
1039  538:   eeb03ac3    vabs.f32    s6, s6
1040  53c:   5eb13a44    vnegpl.f32  s6, s8
1041  540:   eeb10ac4    vsqrt.f32   s0, s8
1042  544:   eeb00bc4    vabs.f64    d0, d4
1043  548:   eeb11b44    vneg.f64    d1, d4
1044  54c:   eeb10bc1    vsqrt.f64   d0, d1
1045  550:   5e00ea10    vmovpl  s0, lr
1046  554:   ee14ba10    vmov    fp, s8
1047  558:   bc4ebb11    vmovlt  d1, fp, lr
1048  55c:   ec557b15    vmov    r7, r5, d5
1049  560:   eeb04a46    vmov.f32    s8, s12
1050  564:   8eb01b42    vmovhi.f64  d1, d2
1051  568:   6eb72a00    vmovvs.f32  s4, #112    ; 0x70
1052  56c:   eeb72b00    vmov.f64    d2, #112    ; 0x70
1053  570:   eeb03a00    vmov.f32    s6, #0
1054  574:   eeb01b00    vmov.f64    d1, #0
1055  578:   ed952a1d    vldr    s4, [r5, #116]  ; 0x74
1056  57c:   3d811a0e    vstrcc  s2, [r1, #56]   ; 0x38
1057  580:   1d957b04    vldrne  d7, [r5, #16]
1058  584:   ed816b39    vstr    d6, [r1, #228]  ; 0xe4
1059 
1060 00000588 <near_flt>:
1061  588:   ed9f1a0e    vldr    s2, [pc, #56]   ; 5c8 <near_post_flt>
1062  58c:   ed8f3a0d    vstr    s6, [pc, #52]   ; 5c8 <near_post_flt>
1063  590:   bd1f2b04    vldrlt  d2, [pc, #-16]  ; 588 <near_flt>
1064  594:   cd0f3b02    vstrgt  d3, [pc, #-8]   ; 594 <near_flt+0xc>
1065  598:   3d9f2a0a    vldrcc  s4, [pc, #40]   ; 5c8 <near_post_flt>
1066  59c:   ed8f0a09    vstr    s0, [pc, #36]   ; 5c8 <near_post_flt>
1067  5a0:   cd9f4b08    vldrgt  d4, [pc, #32]   ; 5c8 <near_post_flt>
1068  5a4:   ed0f0b09    vstr    d0, [pc, #-36]  ; 588 <near_flt>
1069  5a8:   ed9f4a06    vldr    s8, [pc, #24]   ; 5c8 <near_post_flt>
1070  5ac:   ed8f3a05    vstr    s6, [pc, #20]   ; 5c8 <near_post_flt>
1071  5b0:   5d1f4b0c    vldrpl  d4, [pc, #-48]  ; 588 <near_flt>
1072  5b4:   ed0f5b0d    vstr    d5, [pc, #-52]  ; 588 <near_flt>
1073  5b8:   9d9f4a02    vldrls  s8, [pc, #8]    ; 5c8 <near_post_flt>
1074  5bc:   3d0f6a02    vstrcc  s12, [pc, #-8]  ; 5bc <near_flt+0x34>
1075  5c0:   ed9f6b00    vldr    d6, [pc]    ; 5c8 <near_post_flt>
1076  5c4:   bd0f1b01    vstrlt  d1, [pc, #-4]   ; 5c8 <near_post_flt>
1077 
1078 000005c8 <near_post_flt>:
1079  5c8:   ec912a01    vldmia  r1, {s4}
1080  5cc:   2ca62a01    vstmiacs    r6!, {s4}
1081  5d0:   ecb91b08    vldmia  r9!, {d1-d4}
1082  5d4:   eca36b04    vstmia  r3!, {d6-d7}
1083  5d8:   6d323a01    vldmdbvs    r2!, {s6}
1084  5dc:   ed267a01    vstmdb  r6!, {s14}
1085  5e0:   ed3d4b08    vldmdb  sp!, {d4-d7}
1086  5e4:   ed205b06    vstmdb  r0!, {d5-d7}
1087  5e8:   eeb41a41    vcmp.f32    s2, s2
1088  5ec:   7eb44ac4    vcmpevc.f32 s8, s8
1089  5f0:   eeb40b46    vcmp.f64    d0, d6
1090  5f4:   aeb43bc7    vcmpege.f64 d3, d7
1091  5f8:   beb51a40    vcmplt.f32  s2, #0.0
1092  5fc:   ceb57ac0    vcmpegt.f32 s14, #0.0
1093  600:   eeb54b40    vcmp.f64    d4, #0.0
1094  604:   eeb51bc0    vcmpe.f64   d1, #0.0
1095  608:   6ebd1ac3    vcvtvs.s32.f32  s2, s6
1096  60c:   cebc3ac7    vcvtgt.u32.f32  s6, s14
1097  610:   3eb80ac1    vcvtcc.f32.s32  s0, s2
1098  614:   3eb81a42    vcvtcc.f32.u32  s2, s4
1099  618:   8ebd2bc4    vcvthi.s32.f64  s4, d4
1100  61c:   8ebc3bc6    vcvthi.u32.f64  s6, d6
1101  620:   9eb73bc7    vcvtls.f32.f64  s6, d7
1102  624:   eeb83bc4    vcvt.f64.s32    d3, s8
1103  628:   0eb85b47    vcvteq.f64.u32  d5, s14
1104  62c:   eeb74ac5    vcvt.f64.f32    d4, s10
1105  630:   e120017a    bkpt    0x001a
1106  */
1107 
1108   static const unsigned int insns[] =
1109   {
1110     0xe082852b,     0x009310c7,     0xe0290284,     0xc0329066,
1111     0xc04c000e,     0x00528364,     0xe069818d,     0x60708864,
1112     0xd08597a1,     0xe09d12c6,     0xc0adb0c7,     0xe0b80329,
1113     0xe0c392e6,     0x80dd1845,     0x30e28486,     0xe0f4a76d,
1114     0x118db785,     0xe19a9764,     0xe1cd90e5,     0xe1d20547,
1115     0xe086d777,     0x809c4776,     0x90265c57,     0xe035841d,
1116     0xe04c2055,     0x20539c17,     0xc06c9614,     0xe072811c,
1117     0xe08c4d1d,     0xe09b8d76,     0x10a20415,     0xe0beb256,
1118     0x80ca835e,     0xe0dc1615,     0x60e54a7e,     0xe0fc181d,
1119     0x61818076,     0xe19db577,     0xe1ce4216,     0xe1dba31d,
1120     0x828d8261,     0xe29ed69b,     0xe226e87d,     0xe2332f49,
1121     0xe24d46d9,     0xb25e1402,     0xe2650325,     0x3274882f,
1122     0xb2849102,     0xe2948902,     0x22aeac2a,     0xe2b6aabd,
1123     0xe2cc2426,     0xe2da85a5,     0xe2e6d871,     0x12fba6e9,
1124     0x638737ff,     0x03952951,     0x63c18eea,     0x33d2020a,
1125     0xe118028d,     0xe13601a7,     0xe15c0164,     0xb1750807,
1126     0xe112073e,     0x31300572,     0x915e0b37,     0x617a0b17,
1127     0xe3120585,     0x433e071b,     0xe355030e,     0x3377010a,
1128     0xe1a00b84,     0xe1b01484,     0xe1a001aa,     0xe1b00a2a,
1129     0xe1a015c9,     0x61b0254b,     0x31a08fe2,     0xe1b0946c,
1130     0xe1a0877e,     0xe1b0c473,     0xc1a0ce1d,     0xe1b0c61d,
1131     0xc1a00931,     0xc1b0bc33,     0xd1a0265c,     0xb1b0165a,
1132     0xe1a0a003,     0xe1b00009,     0x73a03e29,     0xe3b0497e,
1133     0xe1a0c1a6,     0x71b0554d,     0xe1a0137e,     0x01b0897c,
1134     0x330cbf31,     0x33429bf7,     0xd001059d,     0xe0100b9a,
1135     0xe0207c93,     0x0038639b,     0xe084e695,     0xe0940796,
1136     0xe0a08e9b,     0xe0b4b79e,     0x20c51796,     0x40db059c,
1137     0xe0498592,     0x0060ed94,     0x510d9054,     0x4125005c,
1138     0xe1473055,     0xe1649052,     0xe101658c,     0xe1006cca,
1139     0xe108e3a1,     0xe10176ed,     0xe1206483,     0xe12b7ec4,
1140     0xe14a0786,     0x914b3ec4,     0xe14b8ca3,     0xe14185e3,
1141     0x21220dac,     0xe12806ec,     0xa1620e86,     0xe16807cc,
1142     0x016a0ea3,     0xe1600de3,     0xe697a009,     0xe5d900c4,
1143     0xe1b4e0b6,     0xe1f96ed8,     0xe09120f1,     0xe6890004,
1144     0xe5e5305c,     0xe1c82ab0,     0xe79c8008,     0xe4dab010,
1145     0xe19ab0b6,     0xe1bb50da,     0xe19360f7,     0xe7ad7005,
1146     0xe5ca2000,     0xe08460b3,     0xe59ca000,     0xe4db4084,
1147     0xe09990bc,     0xe0d399d4,     0xe1f2b9f4,     0xe78db00b,
1148     0xe7cd100a,     0xe08ea0b9,     0xe7b36004,     0xe7f6400d,
1149     0xe09760ba,     0xe1b600db,     0xe096a0fd,     0xe783700c,
1150     0xe7e83001,     0xe1cc44b0,     0xe51f1008,     0xe55f7008,
1151     0xe15f21b0,     0xe15fa0d8,     0xe1dfe3f8,     0xe51f2008,
1152     0xe55f3008,     0xe1df72bc,     0xe15fd0d8,     0xe15fa2fc,
1153     0xe51f5008,     0xe5dfe01c,     0xe1df51b8,     0xe15f63dc,
1154     0xe15fb4f0,     0xe59f700c,     0xe5df5008,     0xe15fa4bc,
1155     0xe1df60d0,     0xe15f90f8,     0xe690036a,     0xe7d8348e,
1156     0xe78d5783,     0xe6c99145,     0xe7945360,     0xe7d0e4a4,
1157     0xe68c52cc,     0xe7e13667,     0xe7b26063,     0xe7fe8842,
1158     0xe7a363e6,     0xe7c83502,     0xe79db40e,     0xe7fda20c,
1159     0xe789d142,     0xe7eb774e,     0xe19bcf9f,     0xe1dc4f9f,
1160     0xe1fbbf9f,     0xe18e1f97,     0xe1c4cf96,     0x21e74f96,
1161     0xe6143f17,     0xe61a9f33,     0xe615cf56,     0xe615cf7e,
1162     0xe61a0f97,     0x66180ff2,     0x5624bf15,     0x6623bf3c,
1163     0xe6230f55,     0xe61caf75,     0x3626af9e,     0xe62baff7,
1164     0x56349f1e,     0xe63e1f37,     0x363b9f55,     0xa6313f7b,
1165     0xc635df97,     0xe6351ff7,     0xe654af17,     0x26591f37,
1166     0xe65dbf57,     0xe654bf7e,     0x365d2f97,     0xc65a8ffe,
1167     0xe66c3f1d,     0xe66d4f36,     0xe66a1f5e,     0xd66d2f7e,
1168     0xe66c1f95,     0xc6640ffd,     0x867a0f15,     0xd674bf37,
1169     0xa67e1f59,     0xe67b2f7e,     0xa6749f95,     0x867d2ffe,
1170     0xe683a877,     0x26a59c77,     0xe6b53477,     0xe6c48476,
1171     0x06eb007d,     0xe6fc9075,     0xa68f387b,     0x86af2076,
1172     0xc6bf3c7d,     0xe6cfc875,     0xe6efc875,     0xe6ff8875,
1173     0x06bfaf34,     0xa6bf8fbc,     0xe6ffef37,     0xc6ffdfb7,
1174     0xe719fe1d,     0xe732f61c,     0x47a20a51,     0xe7ee9852,
1175     0x87dd1d9b,     0xe7d0339f,     0xe8060d6b,     0xe80438df,
1176     0xe810e1d4,     0xe83c9873,     0xe8a931bd,     0xe88b55c3,
1177     0xe8bcbd22,     0xe8bda10a,     0xe92b2219,     0xe9298dd5,
1178     0xe935605b,     0xe91191a7,     0xe9ab6e6a,     0xe98b4507,
1179     0xe9b053cc,     0xe9912de7,     0x2a000075,     0x4bfffffe,
1180     0x612fff10,     0xe12fff33,     0xeafffffe,     0xebfffffe,
1181     0x612fff10,     0xe12fff35,     0xda00006d,     0x4bfffffe,
1182     0x112fff19,     0xe12fff3c,     0xeafffedc,     0x8bfffffe,
1183     0x712fff11,     0xc12fff37,     0xaafffed8,     0x8bfffffe,
1184     0xe12fff1c,     0x312fff37,     0xeafffffe,     0xcbfffed3,
1185     0xa12fff11,     0xe12fff30,     0xeafffffe,     0xeb00005c,
1186     0xc12fff1e,     0x112fff3b,     0x2afffffe,     0xebfffffe,
1187     0x212fff1a,     0xe12fff34,     0xeafffec8,     0xebfffffe,
1188     0x312fff1c,     0xe12fff38,     0xea000051,     0xebfffffe,
1189     0xe12fff1a,     0xe12fff31,     0x4e042a06,     0xee052a45,
1190     0xee151a46,     0xbe134a04,     0x4e263a47,     0xee310a00,
1191     0xee321a45,     0x2e810a06,     0xee030b06,     0xee010b45,
1192     0xee141b46,     0x1e110b01,     0x1e253b45,     0x3e320b04,
1193     0xee321b44,     0x4e810b05,     0xeeb03ac3,     0x5eb13a44,
1194     0xeeb10ac4,     0xeeb00bc4,     0xeeb11b44,     0xeeb10bc1,
1195     0x5e00ea10,     0xee14ba10,     0xbc4ebb11,     0xec557b15,
1196     0xeeb04a46,     0x8eb01b42,     0x6eb72a00,     0xeeb72b00,
1197     0xeeb03a00,     0xeeb01b00,     0xed952a1d,     0x3d811a0e,
1198     0x1d957b04,     0xed816b39,     0xed9f1a0e,     0xed8f3a0d,
1199     0xbd1f2b04,     0xcd0f3b02,     0x3d9f2a0a,     0xed8f0a09,
1200     0xcd9f4b08,     0xed0f0b09,     0xed9f4a06,     0xed8f3a05,
1201     0x5d1f4b0c,     0xed0f5b0d,     0x9d9f4a02,     0x3d0f6a02,
1202     0xed9f6b00,     0xbd0f1b01,     0xec912a01,     0x2ca62a01,
1203     0xecb91b08,     0xeca36b04,     0x6d323a01,     0xed267a01,
1204     0xed3d4b08,     0xed205b06,     0xeeb41a41,     0x7eb44ac4,
1205     0xeeb40b46,     0xaeb43bc7,     0xbeb51a40,     0xceb57ac0,
1206     0xeeb54b40,     0xeeb51bc0,     0x6ebd1ac3,     0xcebc3ac7,
1207     0x3eb80ac1,     0x3eb81a42,     0x8ebd2bc4,     0x8ebc3bc6,
1208     0x9eb73bc7,     0xeeb83bc4,     0x0eb85b47,     0xeeb74ac5,
1209     0xe120017a,
1210   };
1211 // END  Generated code -- do not edit
1212 
1213   // reset the detected cpu feature set
1214   VM_Version::features(detected_features);
1215 
1216   {
1217     bool ok = true;
1218     unsigned int *insns1 = (unsigned int *)entry;
1219     for (unsigned int i = 0; i < sizeof insns / sizeof insns[0]; i++) {
1220       if (insns[i] != insns1[i]) {
1221         ok = false;
1222         printf("Ours:\n");
1223         Disassembler::decode((address)&insns1[i], (address)&insns1[i+1]);
1224         printf("  Raw: 0x%x\n", insns1[i]);
1225         printf("Theirs:\n");
1226         Disassembler::decode((address)&insns[i], (address)&insns[i+1]);
1227         printf("  Raw: 0x%x\n", insns[i]);
1228         printf("\n");
1229       }
1230     }
1231     assert(ok, "Assembler smoke test failed");
1232   }
1233 #endif // ASSERT
1234 }
1235 
1236 #undef __
1237 void Address::AddressConstruct(Register base, RegisterOrConstant index, enum reg_op op,
1238                                shift_op shift, enum wb_mode mode) {
1239   _base = base;
1240   _wb_mode = mode;
1241   _shift = shift;
1242   _target = 0;
1243   if (index.is_register()) {
1244     _acc_mode = reg;
1245     _index = index.as_register();
1246     _offset = 0;
1247     _as_op = op;
1248   } else {
1249     assert(shift == lsl(), "should be");
1250     assert(index.is_constant(), "should be");
1251     _acc_mode = imm;
1252     // _index = no_reg;
1253     _offset = index.as_constant();
1254     if(SUB == _as_op)
1255       _offset = -_offset;
1256   }
1257 }
1258 
1259 void Address::encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const {
1260   long offset = _offset;
1261   access_mode mode = _acc_mode;
1262 
1263   if(lit == mode) {
1264     //Create the offset from the address
1265     offset = _target - pc;
1266     mode = imm;
1267   }
1268 
1269   //Correct the offset if the base is the PC
1270   if(r15_pc == _base && imm == mode) {
1271     offset -= 8;
1272   }
1273 
1274   int U = (offset >= 0 && _acc_mode == imm) || (_as_op == ADD && _acc_mode == reg);
1275   int P = pre == _wb_mode || off == _wb_mode;
1276   int W = pre == _wb_mode;
1277   i->f(P, 24), i->f(U, 23), i->f(W, 21), i->rf(_base, 16);
1278 
1279   offset = offset < 0 ? -offset : offset;
1280   int opc = i->get(27, 25);
1281 
1282   if (imm == mode) {
1283     switch(opc) {
1284     case 0b010:
1285       // LDR, LDRB
1286       // STR, STRB
1287       i->f(offset, 11, 0);
1288       break;
1289     case 0b000:
1290       // LDRH, LDRSH, LDRSB, LDRD
1291       // STRH, STRD
1292       i->f(1, 22);
1293       assert(offset < (1 << 8), "Offset larger than a byte");
1294       i->f(offset & 0xF, 3, 0);
1295       i->f(offset >> 4, 11, 8);
1296       break;
1297     default:
1298       ShouldNotReachHere();
1299     }
1300   } else if (reg == mode) {
1301     assert(r15_pc->encoding_nocheck() !=
1302             _base->encoding_nocheck(), "Remove this if you have your offsets right");
1303     switch(opc) {
1304     case 0b010:
1305       // LDR, LDRB
1306       // STR, STRB
1307       //Need to set bit 25 as Register 0b011
1308       i->f(1, 25);
1309       i->f(_shift.shift(), 11, 7);
1310       i->f(_shift.kind(), 6, 5);
1311       i->f(0, 4);
1312       i->rf(_index, 0);
1313       break;
1314     case 0b000:
1315       // LDRH, LDRSH, LDRSB, LDRD
1316       // STRH, STRD
1317       //Need to clear bit 22 as Register
1318       i->f(0, 22);
1319       assert(_shift == lsl(), "Type of load/store does not support shift");
1320       i->f(0b0000, 11, 8);
1321       i->rf(_index, 0);
1322       break;
1323     default:
1324       ShouldNotReachHere();
1325     }
1326   } else {
1327     ShouldNotReachHere();
1328   }
1329 
1330   if(lit == _acc_mode) {
1331     sec->relocate(pc, _rspec);
1332   }
1333 }
1334 
1335 void Address::fp_encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const {
1336   // ATM works only for immediate
1337   assert(_wb_mode == off, "Can't do pre or post addressing for vldr, vstr");
1338   long offset = _offset;
1339   if(imm == _acc_mode) {
1340     if(r15_pc == _base) {
1341       //Correct the offset if the base is the PC
1342       offset -= 8;
1343     }
1344     bool U = offset >= 0;
1345     assert(0 == (offset & 3), "Can only access aligned data");
1346     unsigned imm8 = uabs(offset) / 4;
1347     i->f(U, 23), i->rf(_base, 16), i->f(imm8, 7, 0);
1348   } else {
1349     ShouldNotReachHere();
1350   }
1351 }
1352 
1353 #define __ as->
1354 void Address::lea(MacroAssembler *as, Register r) const {
1355   Relocation* reloc = _rspec.reloc();
1356   relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
1357 
1358   //TODO Potentially remove this - added as aarch64 doesn't contain
1359   // any method of handling pre or post
1360   assert( _wb_mode != pre && _wb_mode != post, "Wrong wb mode");
1361   // could probably permit post however
1362   switch(_acc_mode) {
1363   case imm: {
1364     if (_offset == 0 && _base == r) // it's a nop
1365       break;
1366     if (_offset > 0)
1367       __ add(r, _base, _offset);
1368     else
1369       __ sub(r, _base, -_offset);
1370       break;
1371   }
1372   case reg: {
1373     switch (_as_op) {
1374       case ADD:
1375         __ add(r, _base, _index, _shift);
1376         break;
1377       case SUB:
1378         __ sub(r, _base, _index, _shift);
1379         break;
1380     }
1381     break;
1382   }
1383   case lit: {
1384     if (rtype == relocInfo::none)
1385       __ mov(r, target());
1386     else
1387       __ movptr(r, (uint32_t)target());
1388     break;
1389   }
1390   default:
1391     ShouldNotReachHere();
1392   }
1393 }
1394 #undef __
1395 
1396 #define __ as->
1397 class Address;
1398 
1399 // Adapts given Address to the capabilities of instructions respective to the
1400 // provided data type. E.g. some of the instructions cannot use index register
1401 // while others cannot have an offset field.
1402 // Returns a copy of this Address is it's good or constructs a new Address
1403 // good for respective instructions by emitting necessary code to calculate
1404 // the address in tmp register
1405 Address Address::safe_for(InsnDataType type, MacroAssembler *as, Register tmp) {
1406   if (is_safe_for(type))
1407     return *this;
1408   assert(tmp->is_valid(), "must be");
1409   lea(as, tmp);
1410   return Address(tmp);
1411 }
1412 #undef __
1413 
1414 bool Address::is_safe_for(InsnDataType type) {
1415   switch (_acc_mode) {
1416     case imm:
1417     case lit:
1418       return offset_ok_for_immed(_offset, type);
1419     case reg:
1420       return shift_ok_for_index(_shift, type);
1421     case no_mode:
1422     default:
1423       ShouldNotReachHere();
1424       return false;
1425   }
1426 }
1427 
1428 
1429 bool Address::offset_ok_for_immed(long offset, InsnDataType type) {
1430   const int o = offset < 0 ? -offset : offset;
1431   switch (type) {
1432   case IDT_INT:
1433   case IDT_BOOLEAN:
1434   case IDT_OBJECT:
1435   case IDT_ADDRESS:
1436   case IDT_METADATA:
1437   case IDT_ARRAY:
1438     return o <= 0xfff;
1439   case IDT_BYTE:
1440   case IDT_SHORT:
1441   case IDT_LONG:
1442   case IDT_CHAR:
1443     return o <= 0xff;
1444   case IDT_FLOAT:
1445   case IDT_DOUBLE:
1446     return !(o & ~0x3fc);
1447   case IDT_LEA:
1448     return true;
1449   case IDT_ATOMIC:
1450   case IDT_MULTIWORD:
1451     return !o;
1452   default:
1453     ShouldNotReachHere();
1454     return false;
1455   }
1456 }
1457 
1458 bool Address::shift_ok_for_index(shift_op shift, InsnDataType type) {
1459   switch (type) {
1460   case IDT_INT:
1461   case IDT_BOOLEAN:
1462   case IDT_OBJECT:
1463   case IDT_ADDRESS:
1464   case IDT_METADATA:
1465   case IDT_ARRAY:
1466     return !shift.is_register();
1467   case IDT_BYTE:
1468   case IDT_SHORT:
1469   case IDT_LONG:
1470   case IDT_CHAR:
1471     return !shift.is_register() && shift.shift() == 0;
1472   case IDT_LEA:
1473     return true;
1474   case IDT_FLOAT:
1475   case IDT_DOUBLE:
1476   case IDT_ATOMIC:
1477   case IDT_MULTIWORD:
1478     return false;
1479   default:
1480     ShouldNotReachHere();
1481     return false;
1482   }
1483 }
1484 
1485 void Assembler::emit_data64(jlong data,
1486                             relocInfo::relocType rtype,
1487                             int format) {
1488   if (rtype == relocInfo::none) {
1489     emit_int64(data);
1490   } else {
1491     emit_data64(data, Relocation::spec_simple(rtype), format);
1492   }
1493 }
1494 
1495 void Assembler::emit_data64(jlong data,
1496                             RelocationHolder const& rspec,
1497                             int format) {
1498 
1499   assert(inst_mark() != NULL, "must be inside InstructionMark");
1500   // Do not use AbstractAssembler::relocate, which is not intended for
1501   // embedded words.  Instead, relocate to the enclosing instruction.
1502   code_section()->relocate(inst_mark(), rspec, format);
1503   emit_int64(data);
1504 }
1505 
1506 extern "C" {
1507   void das(uint64_t start, int len) {
1508     ResourceMark rm;
1509     len <<= 2;
1510     if (len < 0)
1511       Disassembler::decode((address)start + len, (address)start);
1512     else
1513       Disassembler::decode((address)start, (address)start + len);
1514   }
1515 
1516   JNIEXPORT void das1(unsigned long insn) {
1517     das(insn, 1);
1518   }
1519 }
1520 
1521 #define starti Instruction_aarch32 do_not_use(this); set_current(&do_not_use)
1522 
1523   void Assembler::adr(Register Rd, address adr, Condition cond) {
1524     int offset = adr - pc() - 8;
1525     adr_encode(Rd, offset, cond);
1526   }
1527 
1528 #undef starti
1529 
1530 Address::Address(address target, relocInfo::relocType rtype)
1531   : _acc_mode(lit), _base(sp), _offset(0), _wb_mode(off) {
1532   //TODO we don't complete _wb_mode - what about Addresses that are pre/post accessed?
1533   _is_lval = false;
1534   _target = target;
1535   switch (rtype) {
1536   case relocInfo::oop_type:
1537   case relocInfo::metadata_type:
1538     // Oops are a special case. Normally they would be their own section
1539     // but in cases like icBuffer they are literals in the code stream that
1540     // we don't have a section for. We use none so that we get a literal address
1541     // which is always patchable.
1542     break;
1543   case relocInfo::external_word_type:
1544     _rspec = external_word_Relocation::spec(target);
1545     break;
1546   case relocInfo::internal_word_type:
1547     _rspec = internal_word_Relocation::spec(target);
1548     break;
1549   case relocInfo::opt_virtual_call_type:
1550     _rspec = opt_virtual_call_Relocation::spec();
1551     break;
1552   case relocInfo::static_call_type:
1553     _rspec = static_call_Relocation::spec();
1554     break;
1555   case relocInfo::runtime_call_type:
1556     _rspec = runtime_call_Relocation::spec();
1557     break;
1558   case relocInfo::poll_type:
1559   case relocInfo::poll_return_type:
1560     _rspec = Relocation::spec_simple(rtype);
1561     break;
1562   case relocInfo::none:
1563     _rspec = RelocationHolder::none;
1564     break;
1565   default:
1566     ShouldNotReachHere();
1567     break;
1568   }
1569 }
1570 
1571 #ifdef COMPILER2
1572 Address Address::make_raw(int base, int index, int scale, unsigned long o, relocInfo::relocType disp_reloc)  {
1573   RelocationHolder rspec;
1574   if (disp_reloc != relocInfo::none) {
1575     rspec = Relocation::spec_simple(disp_reloc);
1576   }
1577   if (as_Register(index) == r15_pc) {
1578     assert(scale == 0, "unsupported");
1579     Address a(as_Register(base), o);
1580     a._rspec = rspec;
1581     return a;
1582   } else {
1583     assert(o == 0, "unsupported");
1584     Address a(as_Register(base), as_Register(index), lsl(scale));
1585     a._rspec = rspec;
1586     return a;
1587   }
1588 }
1589 #endif
1590 
1591 void Assembler::adr(Register r, const Address &dest, Condition cond) {
1592   code_section()->relocate(pc(), dest.rspec());
1593   adr(r, dest.target());
1594 }
1595 
1596 void Assembler::wrap_label(Label &L, Assembler::uncond_branch_insn insn) {
1597   if (L.is_bound()) {
1598     (this->*insn)(target(L));
1599   } else {
1600     L.add_patch_at(code(), locator());
1601     (this->*insn)(pc());
1602   }
1603 }
1604 void Assembler::wrap_label(Label &L, Condition cond,
1605                            Assembler::cond_branch_insn insn) {
1606   if (L.is_bound()) {
1607     (this->*insn)(target(L), cond);
1608   } else {
1609     L.add_patch_at(code(), locator());
1610     (this->*insn)(pc(), cond);
1611   }
1612 }
1613 
1614 void Assembler::wrap_label(Register r, Label &L, Condition cond,
1615                            Assembler::cond_ldst_insn insn) {
1616   if (L.is_bound()) {
1617     (this->*insn)(r, target(L), cond);
1618   } else {
1619     L.add_patch_at(code(), locator());
1620     (this->*insn)(r, pc(), cond);
1621   }
1622 }
1623 
1624 
1625 void Assembler::wrap_label(FloatRegister r, Label &L, Condition cond,
1626                            Assembler::cond_fp_ldst_insn insn) {
1627   if (L.is_bound()) {
1628     (this->*insn)(r, target(L), cond);
1629   } else {
1630     L.add_patch_at(code(), locator());
1631     (this->*insn)(r, pc(), cond);
1632   }
1633 }
1634 
1635 
1636 uint32_t Assembler::encode_imm12(int imm) {
1637   assert(is_valid_for_imm12(imm),
1638          "only valid immediates allowed, call is_valid_for_imm12 first");
1639   uint32_t n = imm;
1640   if ((n & 0xFFFFFF00) == 0) {
1641     return n;
1642   }
1643   if ((n & 0xFC000000) == 0) {
1644     const int lshift = __builtin_ctz(n) & 0xFFFFFFFE;
1645     return ((32 - lshift) << 7) | (n >> lshift);
1646   }
1647   n = (n << 16) | (n >> 16);
1648   const int lshift = __builtin_ctz(n) & 0xFFFFFFFE;
1649   return ((16 - lshift) << 7) | (n >> lshift);
1650 }
1651 
1652 int Assembler::decode_imm12(uint32_t imm12) {
1653   assert((imm12 & 0xFFFFF000) == 0, "bad imm12");
1654   uint32_t shift = (imm12 & 0x00000F00) >> 7;
1655   uint32_t value = imm12 & 0x000000FF;
1656   return (int) ((value >> shift) | (value << (32 - shift)));
1657 }
1658 
1659 bool Assembler::is_valid_for_imm12(int imm) {
1660   uint32_t n = (uint32_t) imm;
1661   uint32_t shift = __builtin_clz(n) & 0xFFFFFFFE;
1662   uint32_t result = n << shift;
1663   if ((result & 0x00FFFFFF) == 0) {
1664     return true;
1665   }
1666   n = (n << 16) | (n >> 16);
1667   shift = __builtin_clz(n) & 0xFFFFFFFE;
1668   result = n << shift;
1669   if ((result & 0x00FFFFFF) == 0) {
1670     return true;
1671   }
1672   return false;
1673 }
1674 
1675 bool Assembler::operand_valid_for_logical_immediate(bool is32, uint64_t imm) {
1676   return is32 && is_valid_for_imm12(imm);
1677 }
1678 
1679 bool Assembler::operand_valid_for_add_sub_immediate(int imm) {
1680   return is_valid_for_imm12(imm);
1681 }
1682 
1683 bool Assembler::operand_valid_for_add_sub_immediate(unsigned long imm) {
1684   return is_valid_for_imm12(imm);
1685 }
1686 
1687 bool Assembler::operand_valid_for_add_sub_immediate(unsigned imm) {
1688   return is_valid_for_imm12(imm);
1689 }
1690 
1691 bool Assembler::operand_valid_for_add_sub_immediate(jlong imm) {
1692   return is_valid_for_imm12(imm >> 32) && is_valid_for_imm12(imm);
1693 }
1694 
1695 // n.b. this is implemented in subclass MacroAssembler
1696 void Assembler::bang_stack_with_offset(int offset) { Unimplemented(); }
1697 
1698 int AbstractAssembler::code_fill_byte() {
1699   return 0;
1700 }
1701 
1702 void Assembler::mov_immediate(Register dst, uint32_t imm32, Condition cond, bool s) {
1703 #ifndef PRODUCT
1704     {
1705       char buffer[64];
1706       snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32);
1707       block_comment(buffer);
1708     }
1709 #endif
1710   if(is_valid_for_imm12(imm32)) {
1711     if(s) movs_i(dst, (unsigned)imm32, cond);
1712     else  mov_i (dst, (unsigned)imm32, cond);
1713   } else if(is_valid_for_imm12(~imm32)) {
1714     if(s) mvns_i(dst, (unsigned)~imm32, cond);
1715     else  mvn_i (dst, (unsigned)~imm32, cond);
1716   } else if (!s && VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2) &&
1717              (imm32 < (1 << 16))) {
1718     movw_i(dst, (unsigned)imm32, cond);
1719   } else if (!s && VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2) &&
1720              !(imm32 & ((1 << 16) - 1))) {
1721     movw_i(dst, (unsigned)0, cond);
1722     movt_i(dst, (unsigned)(imm32 >> 16), cond);
1723   } else { // TODO Could expand to varied numbers of mov and orrs
1724     //Need to do a full 32 bits
1725     mov_immediate32(dst, imm32, cond, s);
1726   }
1727 }
1728 
1729 //This should really be in the macroassembler
1730 void Assembler::mov_immediate32(Register dst, uint32_t imm32, Condition cond, bool s)
1731 {
1732     // Need to move a full 32 bit immediate, for example if we're loading an address that
1733     // might change later and therefore need to be updated.
1734   if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2))  {
1735     //Use a movw and a movt
1736     Assembler::movw_i(dst, (unsigned)(imm32 & 0xffff), cond);
1737     Assembler::movt_i(dst, (unsigned)(imm32 >> 16), cond);
1738     if(s) {
1739       //Additionally emit a cmp instruction
1740       Assembler::cmp(dst, 0);
1741     }
1742   } else {
1743     // Sadly we don't have movw, movt
1744     // instead emit a mov and three orr
1745     mov_i(dst,    imm32 & (0xff      ), cond);
1746     orr(dst, dst, imm32 & (0xff << 8 ), cond);
1747     orr(dst, dst, imm32 & (0xff << 16), cond);
1748     if(s) orrs(dst, dst, imm32 & (0xff << 24), cond);
1749     else  orr (dst, dst, imm32 & (0xff << 24), cond);
1750   }
1751 }
1752 
1753 #define starti Instruction_aarch32 do_not_use(this); set_current(&do_not_use)
1754 void Assembler::add_sub_imm(int decode, Register Rd, Register Rn, int imm,
1755                             Condition cond, bool s) {
1756   int cpart = 0;
1757   switch(decode) {
1758     case 0b0100: cpart = 0b0010; break; // ADD  ->  SUB
1759     case 0b0010:                        // SUB  ->  ADD
1760     case 0b0011: cpart = 0b0100; break; // RSB  ->  ADD
1761     case 0b0101: cpart = 0b0110; break; // ADC  ->  SUBC
1762     case 0b0110:                        // SUBC ->  ADC
1763     case 0b0111: cpart = 0b0101; break; // RSC  ->  ADC
1764     default: ShouldNotReachHere();
1765   }
1766   //try both possible imm_instrs
1767   if(imm_instr(decode, Rd, Rn, imm, cond, s)) return;
1768   if(imm_instr(cpart, Rd, Rn, -imm, cond, s)) return;
1769 
1770   //Try plan B - a mov first - need to have destination that is not an arg
1771   assert(Rd != Rn, "Can't use imm and can't do a mov. I'm in a jam.");
1772   mov_immediate(Rd, (uint32_t)uabs(imm), cond, s);
1773   //Now do the non immediate version - copied from the immediate encodings
1774   {
1775     starti;
1776     reg_instr( imm < 0 ? cpart : decode, lsl(), cond, s);
1777     rf(Rn, 16), rf(Rd, 12), rf(Rd, 0);
1778   }
1779 }
1780 
1781 bool Assembler::can_ldst_multiple( unsigned regset, const Address& adr) {
1782   int nbits = count_bits(regset);
1783   return adr.get_mode() == Address::imm &&
1784          !(adr.base()->bit() & regset) && // FIXME, this could be relaxed
1785          (((adr.offset() == 0 || adr.offset() == wordSize || adr.offset() == -nbits * wordSize) &&
1786            (adr.get_wb_mode() == Address::pre || adr.get_wb_mode() == Address::off)) ||
1787           ((adr.offset() == 0 || adr.offset() == -wordSize || adr.offset() == nbits * wordSize) &&
1788            adr.get_wb_mode() == Address::post));
1789 }
1790 
1791 unsigned Assembler::count_bits(unsigned val) {
1792   unsigned i, count;
1793   for(i = 0, count = 0; i < 8 * sizeof(val); val >>= 1, i++)
1794     if( val & 1 ) count++;
1795   return count;
1796 }
1797 
1798 
1799 void Assembler::vmov_imm(FloatRegister Rd, unsigned imm, bool is64bit,
1800                          Condition cond) {
1801   starti;
1802   fp_instr_base(is64bit, cond);
1803   f(0b1011, 23, 20);
1804   // double register passed (see 'd0'-'dN' encoding), not reencode it's number
1805   fp_rencode(Rd, false, 12, 22);
1806   f(0b0000, 7, 4);
1807   f(imm & 0xf, 3, 0);
1808   f(imm >> 4, 19, 16);
1809 }
1810 
1811 void Assembler::vmov_imm(FloatRegister Rd, unsigned imm) {
1812   assert(operand_valid_for_double_immediate(0), "operand should be valid for immediate");
1813   int cmod = 0b0000;
1814   {
1815     starti;
1816     f(0b1111001, 31, 25);
1817     f(0, 24); // imm1
1818     f(0b10000, 23, 19);
1819     // double register passed (see 'd0'-'dN' encoding), not reencode it's number
1820     fp_rencode(Rd, false, 12, 22);
1821     f(cmod, 11, 8);
1822     f(0b000, 7, 5);
1823     f(1, 4);
1824     f(imm & 0xf, 3, 0);  //imm4
1825     f(imm >> 4, 18, 16); //imm3
1826   }
1827 }
1828 
1829 void Assembler::vmov_imm_zero(FloatRegister Rd, bool is64bit,
1830                               Condition cond) {
1831   // Note that this is not a floating point vmov but instead
1832   // an integer vmov from the SIMD instructions.
1833   // cannot be conditional.
1834   assert(operand_valid_for_double_immediate(0), "operand should be valid for immediate");
1835   assert(is64bit, "SIMD loading available only for double registers");
1836   assert(cond == C_DFLT, "Unable to vmov #0 conditionally");
1837   //int cmod = is64bit? 0b1110 : 0b0000; // ? I64 : I32
1838   int cmod = 0b1110;
1839   {
1840     starti;
1841     f(0b1111001, 31, 25);
1842     f(0, 24); // imm1
1843     f(0b10000, 23, 19);
1844     // double register passed (see 'd0'-'dN' encoding), not reencode it's number
1845     fp_rencode(Rd, false, 12, 22);
1846     f(0b000, 18, 16); //imm3
1847     f(cmod, 11, 8);
1848     f(0b00, 7, 6);
1849     f(is64bit, 5);
1850     f(1, 4);
1851     f(0b0000, 3, 0); //imm4
1852   }
1853 }
1854 
1855 bool Assembler::operand_valid_for_float_immediate(float v) {
1856     if (!(VM_Version::features() & FT_VFPV3)) {
1857         return false;
1858     }
1859     union ufloat {
1860         float f;
1861         uint32_t u;
1862     } imm;
1863     unsigned tmp;
1864     imm.f = v;
1865 
1866     if (imm.u & ((1 << 19) - 1))
1867         return false;
1868 
1869     tmp = (imm.u >> 25) & ((1 << 6) - 1);
1870     return tmp == 32 || tmp == 31;
1871 }
1872 
1873 bool Assembler::operand_valid_for_double_immediate(double v) {
1874     if (!(VM_Version::features() & FT_VFPV3)) {
1875         return false;
1876     }
1877     union ufloat {
1878         double f;
1879         uint64_t u;
1880     } imm;
1881     unsigned tmp;
1882     imm.f = v;
1883 
1884     if ((VM_Version::features() & FT_AdvSIMD) && imm.u == 0)
1885         return true;
1886 
1887     if (imm.u & (uint64_t) 0xffffffffffffLL)
1888         return false;
1889 
1890     imm.u >>= 48;
1891 
1892     tmp = (imm.u >> 6) & ((1 << 9) - 1);
1893     return tmp == 0x100 || tmp == 0xff;
1894 }
1895 
1896 unsigned Assembler::encode_float_fp_imm(float imm_f) {
1897   assert(operand_valid_for_float_immediate(imm_f), "operand should be valid for immediate");
1898   union ufloat {
1899     float f;
1900     uint32_t u;
1901   } imm;
1902   unsigned tmp, imm8;
1903   imm.f = imm_f;
1904 
1905   assert(!(imm.u & ((1 << 19) - 1)), "Invalid float imm");
1906   tmp = (imm.u >> 25) & ((1 << 6) - 1);
1907   assert(tmp == 32 || tmp == 31, "Invalid float imm");
1908 
1909   imm8 = (imm.u >> 24) & 0x80; // set a
1910   imm8 |= (imm.u >> 19) & 0x7F; // set bcdefgh
1911   return imm8;
1912 }
1913 
1914 unsigned Assembler::encode_double_fp_imm(double imm_f) {
1915   assert(operand_valid_for_double_immediate(imm_f), "operand should be valid for immediate");
1916   union ufloat {
1917     double f;
1918     uint64_t u;
1919   } imm;
1920   unsigned tmp, imm8;
1921   imm.f = imm_f;
1922 
1923   assert(!(imm.u & (uint64_t)0xffffffffffffLL), "Invalid float imm");
1924   imm.u >>= 48;
1925 
1926   tmp = (imm.u >> 6) & ((1 << 9) - 1);
1927   assert(tmp == 0x100 || tmp == 0xff, "Invalid float imm");
1928 
1929   imm8 = (imm.u >> 8) & 0x80; // set a
1930   imm8 |= imm.u & 0x7F; // set bcdefgh
1931   return imm8;
1932 }
1933 
1934 
1935 void Assembler::fp_ldst_instr(int decode, bool is64bit, const Address& adr,
1936                               Condition cond) {
1937   f(cond, 31, 28), f(0b110, 27, 25), f(decode, 24, 20);
1938   f(0b101, 11, 9), f(is64bit, 8);
1939   adr.fp_encode(current, code_section(), pc());
1940 }
1941 
1942 void Assembler::fp_ldst_mul(Register Rn, uint32_t regset, bool load, bool is64bit,
1943                             enum fp_mode mode, Condition cond) {
1944   starti;
1945   bool P = db_wb == mode;
1946   bool U = ia_wb == mode || ia == mode;
1947   bool W = ia_wb == mode || db_wb == mode;
1948   // Encode registers
1949   unsigned i, fp_first_reg, nregs = 1;
1950   bool enc_z = false;
1951   for(fp_first_reg = 0; !(regset & 1); regset >>= 1, fp_first_reg++);
1952   FloatRegister Rd = (FloatRegister) fp_first_reg;
1953   for(i = 0; i + fp_first_reg < 8 * sizeof(int); i++) {
1954     regset >>= 1;
1955     if(regset & 1) {
1956       assert(!enc_z, "Unable to encode non-consecutive registers in fp_ldst_mul");
1957       nregs++;
1958     } else {
1959       enc_z = true;
1960     }
1961   }
1962   assert(!is64bit || nregs <= 16, "Too many registers in a set");
1963   f(cond, 31, 28), f(0b110, 27, 25); f(P, 24), f(U, 23), f(W, 21), f(load, 20);
1964   // vstm/vstm uses double register number, not it's encoding. Should reencode it.
1965   rf(Rn, 16), fp_rencode(Rd, is64bit, 12, 22), f(0b101, 11, 9), f(is64bit, 8);
1966   f(is64bit ? nregs * 2 : nregs, 7, 0);
1967 }
1968 
1969 void Assembler::simd_ldst(FloatRegister Rd, unsigned type, unsigned size, unsigned num_regs,
1970         const Address &addr, enum SIMD_Align align, unsigned encode) {
1971   starti;
1972   assert(addr.get_mode() == Address::imm &&
1973           (addr.get_wb_mode() == Address::off && addr.offset() == 0) ||
1974           (addr.get_wb_mode() == Address::post && addr.offset() == long(8*num_regs)), "Unsupported");
1975   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
1976   f(0b1111, 31, 28), f(0b0100, 27, 24), f(0, 23), f(encode, 21, 20);
1977   rf(addr.base(), 16), fp_rencode(Rd, false, 12, 22), f(type, 11, 8), f(size, 7, 6);
1978   f((unsigned)align, 5, 4), f(addr.get_wb_mode() == Address::post ? 0b1101 : 0b1111, 3, 0);
1979 }
1980 
1981 void Assembler::simd_ldst_single(FloatRegister Rd, unsigned size, unsigned index,
1982         const Address &addr, bool align, unsigned encode) {
1983   starti;
1984   assert(addr.get_mode() == Address::imm &&
1985           (addr.get_wb_mode() == Address::off && addr.offset() == 0) ||
1986           (addr.get_wb_mode() == Address::post && addr.offset() == long(1<<size)), "Unsupported");
1987   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
1988   f(0b1111, 31, 28), f(0b0100, 27, 24), f(1, 23), f(encode, 21, 20);
1989   rf(addr.base(), 16), fp_rencode(Rd, false, 12, 22), f(0b00, 9, 8), f(size, 11, 10);
1990   f((index<<(size+1))|(size&&align?1:0), 7, 4), f(addr.get_wb_mode() == Address::post ? 0b1101 : 0b1111, 3, 0);
1991 }
1992 
1993 void Assembler::simd_vmov(FloatRegister Dd, unsigned index, Register Rt, bool advsimd,
1994           unsigned index_bits, unsigned bit20, unsigned opc, Condition cond) {
1995   starti;
1996   assert(index < (1u<<index_bits), "Illegal element index");
1997   assert(!advsimd || (VM_Version::features() & FT_AdvSIMD), "SIMD coprocessor required");
1998   opc |= index << (3 - index_bits);
1999   f(cond, 31, 28), f(0b1110, 27, 24), f((opc>>2)&3, 22, 21), f(bit20, 20);
2000   fp_rencode(Dd, false, 16, 7), f(opc>>4, 23);
2001   rf(Rt, 12), f(0b1011, 11, 8), f(opc & 3, 6, 5), f(0b10000, 4, 0);
2002 }
2003 
2004 void Assembler::simd_logicalop(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm, unsigned q,
2005         unsigned a, unsigned b, unsigned u, unsigned c) {
2006   starti;
2007   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2008   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2009   f(0b1111001, 31, 25), f(u, 24), f(0, 23), f(c, 21, 20), fp_rencode(Dd, false, 12, 22);
2010   fp_rencode(Dn, false, 16, 7), f(a, 11, 8), fp_rencode(Dm, false, 0, 5), f(q, 6), f(b, 4);
2011 }
2012 
2013 void Assembler::simd_vmul(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm,
2014           unsigned bit24, unsigned bits109, unsigned size, unsigned mul, unsigned bit6) {
2015   starti;
2016   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2017   f(0b1111001, 31, 25), f(bit24, 24), f(size, 21, 20), fp_rencode(Dd, false, 12, 22);
2018   f(mul^1, 23), fp_rencode(Dn, false, 16, 7), f(1, 11), f(bits109, 10, 9);
2019   f(mul, 8), f(bit6, 6), f(mul, 4), fp_rencode(Dm, false, 0, 5);
2020 }
2021 
2022 void Assembler::simd_vuzp(FloatRegister Dd, FloatRegister Dm, unsigned size, unsigned q) {
2023   starti;
2024   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2025   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2026   f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b11, 21, 20), f(size, 19, 18);
2027   f(0b10, 17, 16), f(0b00010, 11, 7), f(q, 6), f(0, 4), fp_rencode(Dm, false, 0, 5);
2028 }
2029 
2030 void Assembler::simd_vshl(FloatRegister Dd, FloatRegister Dm, unsigned imm,
2031         unsigned q, unsigned u, unsigned encode) {
2032   starti;
2033   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2034   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2035   f(0b1111001, 31, 25), f(u, 24), f(1, 23), fp_rencode(Dd, false, 12, 22);
2036   f(imm & 0b111111, 21, 16), f(imm >> 6, 7), f(q, 6);
2037   f(encode, 11, 8), fp_rencode(Dm, false, 0, 5), f(1, 4);
2038 }
2039 
2040 void Assembler::simd_vshl(FloatRegister Dd, FloatRegister Dm, FloatRegister Dn, unsigned size,
2041         unsigned q, unsigned u) {
2042   starti;
2043   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2044   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2045   f(0b1111001, 31, 25), f(u, 24), f(0b0, 23), f(size, 21, 20), fp_rencode(Dn, false, 16, 7);
2046   fp_rencode(Dd, false, 12, 22), f(0b0100, 11, 8), f(q, 6), fp_rencode(Dm, false, 0, 5), f(0, 4);
2047 }
2048 
2049 // Two registers miscellaneous
2050 void Assembler::simd_insn(FloatRegister Dd, FloatRegister Dm, unsigned q, unsigned a,
2051           unsigned b, unsigned size) {
2052   starti;
2053   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2054   f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b11, 21, 20);
2055   f(size, 19, 18), f(a, 17, 16), f(0b0, 11), f(b, 10, 6);
2056   fp_rencode(Dm, false, 0, 5), f(0, 4);
2057 }
2058 
2059 void Assembler::simd_cnt(FloatRegister Dd, FloatRegister Dm, unsigned q) {
2060   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2061   starti;
2062   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2063   f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b110000, 21, 16);
2064   f(0b01010, 11, 7), f(q, 6), fp_rencode(Dm, false, 0, 5), f(0b0, 4);
2065 }
2066 
2067 void Assembler::simd_padl(FloatRegister Dd, FloatRegister Dm, unsigned q, unsigned size,
2068         unsigned op, unsigned encode) {
2069   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2070   starti;
2071   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2072   assert(size != 0b11, "unsupported");
2073   f(0b111100111, 31, 23), f(0b11, 21, 20), f(0b00, 17, 16), f(0b0, 11);
2074   fp_rencode(Dd, false, 12, 22), f(0b0, 4), fp_rencode(Dm, false, 0, 5);
2075   f(size, 19, 18), f(op, 7), f(q, 6), f(encode, 10, 8);
2076 }
2077 
2078 void Assembler::simd_dup(FloatRegister Dd, Register Rt, unsigned q, unsigned size) {
2079   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2080   assert(size != 0b11, "must be");
2081   assert(!q || (Dd->encoding() & 2) == 0, "Odd register");
2082   starti;
2083   f(0b111011101, 31, 23), f(size >> 1, 22), f(q, 21), f(0, 20), fp_rencode(Dd, false, 16, 7);
2084   rf(Rt, 12), f(0b1011, 11, 8), f(size & 1, 6, 5), f(0b10000, 4, 0);
2085 }
2086 
2087 void Assembler::simd_dup(FloatRegister Dd, FloatRegister Dm, unsigned index, unsigned q, unsigned size) {
2088   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2089   assert(size != 0b11, "must be");
2090   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2091   starti;
2092   f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b11, 21, 20);
2093   f(((index<<1)|1)<<(2-size), 19, 16), f(0b11000, 11, 7), f(q, 6), fp_rencode(Dm, false, 0, 5), f(0b0, 4);
2094 }
2095 
2096 void Assembler::simd_neg(FloatRegister Dd, FloatRegister Dm, unsigned q, unsigned size) {
2097   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2098   assert(size != 0b11, "must be");
2099   assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
2100   starti;
2101   f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b11, 21, 20), f(size, 19, 18);
2102   f(0b01, 17, 16), f(0b00111, 11, 7), f(q, 6), fp_rencode(Dm, false, 0, 5), f(0b0, 4);
2103 }
2104 
2105 void Assembler::simd_vmov(FloatRegister Dd, unsigned imm, unsigned q, unsigned op_cmode) {
2106   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2107   assert(!q || (Dd->encoding() & 2) == 0, "Odd register");
2108   assert(!(imm >> 8), "must be imm8");
2109   starti;
2110   f(0b1111001, 31, 25), f(imm>>7, 24), f(0b1, 23), fp_rencode(Dd, false, 12, 22);
2111   f(0b000, 21, 19), f((imm>>4)&0x7, 18, 16), f(op_cmode&0xf, 11, 8), f(0b0, 7);
2112   f(q, 6); f(op_cmode>>4, 5), f(0b1, 4), f(imm&0xf, 3, 0);
2113 }
2114 
2115 void Assembler::simd_insn(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm,
2116         unsigned q, unsigned a, unsigned b, unsigned u, unsigned c) {
2117   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2118   assert(!q || (Dd->encoding() & 2) == 0, "Odd register");
2119   starti;
2120   f(0b1111001, 31, 25), f(u, 24), f(0b0, 23), f(c, 21, 20), f(a, 11, 8), f(b, 4), f(q, 6);
2121   fp_rencode(Dn, false, 16, 7), fp_rencode(Dd, false, 12, 22), fp_rencode(Dm, false, 0, 5);
2122 }
2123 
2124 void Assembler::simd_mvn(FloatRegister Dd, FloatRegister Dm, unsigned q) {
2125   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2126   starti;
2127   f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b11, 21, 20), f(0b00, 19, 18);
2128   f(0b00, 17, 16), f(0b01011, 11, 7), f(q, 6), fp_rencode(Dm, false, 0, 5), f(0b0, 4);
2129 }
2130 
2131 void Assembler::simd_insn(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm,
2132         unsigned qn, unsigned a, unsigned b, unsigned u) {
2133   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2134   assert((Dd->encoding() & 2) == 0, "Odd register");
2135   assert(!qn || (Dn->encoding() & 2) == 0, "Odd operand register");
2136   starti;
2137   f(0b1111001, 31, 25), f(u, 24), f(0b1, 23), f(b, 21, 20), f(a, 11, 8), f(0b0, 4), f(0b0, 6);
2138   fp_rencode(Dn, false, 16, 7), fp_rencode(Dd, false, 12, 22), fp_rencode(Dm, false, 0, 5);
2139 }
2140 
2141 void Assembler::simd_vext(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm, unsigned q, unsigned imm) {
2142   assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
2143   assert(!q || ((Dd->encoding() & 2) == 0 && (Dn->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd register");
2144   starti;
2145   f(0b111100101, 31, 23), f(0b11, 21, 20), f(imm, 11, 8), f(q, 6), f(0b0, 4);
2146   fp_rencode(Dn, false, 16, 7), fp_rencode(Dd, false, 12, 22), fp_rencode(Dm, false, 0, 5);
2147 }
2148 
2149 #undef starti