1 //
   2 // Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 
  24 // ARM Architecture Description File
  25 
  26 //----------REGISTER DEFINITION BLOCK------------------------------------------
  27 // This information is used by the matcher and the register allocator to
  28 // describe individual registers and classes of registers within the target
  29 // archtecture.
  30 register %{
  31 //----------Architecture Description Register Definitions----------------------
  32 // General Registers
  33 // "reg_def"  name ( register save type, C convention save type,
  34 //                   ideal register type, encoding, vm name );
  35 // Register Save Types:
  36 //
  37 // NS  = No-Save:       The register allocator assumes that these registers
  38 //                      can be used without saving upon entry to the method, &
  39 //                      that they do not need to be saved at call sites.
  40 //
  41 // SOC = Save-On-Call:  The register allocator assumes that these registers
  42 //                      can be used without saving upon entry to the method,
  43 //                      but that they must be saved at call sites.
  44 //
  45 // SOE = Save-On-Entry: The register allocator assumes that these registers
  46 //                      must be saved before using them upon entry to the
  47 //                      method, but they do not need to be saved at call
  48 //                      sites.
  49 //
  50 // AS  = Always-Save:   The register allocator assumes that these registers
  51 //                      must be saved before using them upon entry to the
  52 //                      method, & that they must be saved at call sites.
  53 //
  54 // Ideal Register Type is used to determine how to save & restore a
  55 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  56 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  57 // FIXME: above comment seems wrong.  Spill done through MachSpillCopyNode
  58 //
  59 // The encoding number is the actual bit-pattern placed into the opcodes.
  60 
  61 
  62 // ----------------------------
  63 // Integer/Long Registers
  64 // ----------------------------
  65 
  66 // TODO: would be nice to keep track of high-word state:
  67 // zeroRegI --> RegL
  68 // signedRegI --> RegL
  69 // junkRegI --> RegL
  70 // how to tell C2 to treak RegI as RegL, or RegL as RegI?
  71 reg_def R_R0  (SOC, SOC, Op_RegI,   0, R0->as_VMReg());
  72 reg_def R_R0x (SOC, SOC, Op_RegI, 255, R0->as_VMReg()->next());
  73 reg_def R_R1  (SOC, SOC, Op_RegI,   1, R1->as_VMReg());
  74 reg_def R_R1x (SOC, SOC, Op_RegI, 255, R1->as_VMReg()->next());
  75 reg_def R_R2  (SOC, SOC, Op_RegI,   2, R2->as_VMReg());
  76 reg_def R_R2x (SOC, SOC, Op_RegI, 255, R2->as_VMReg()->next());
  77 reg_def R_R3  (SOC, SOC, Op_RegI,   3, R3->as_VMReg());
  78 reg_def R_R3x (SOC, SOC, Op_RegI, 255, R3->as_VMReg()->next());
  79 reg_def R_R4  (SOC, SOC, Op_RegI,   4, R4->as_VMReg());
  80 reg_def R_R4x (SOC, SOC, Op_RegI, 255, R4->as_VMReg()->next());
  81 reg_def R_R5  (SOC, SOC, Op_RegI,   5, R5->as_VMReg());
  82 reg_def R_R5x (SOC, SOC, Op_RegI, 255, R5->as_VMReg()->next());
  83 reg_def R_R6  (SOC, SOC, Op_RegI,   6, R6->as_VMReg());
  84 reg_def R_R6x (SOC, SOC, Op_RegI, 255, R6->as_VMReg()->next());
  85 reg_def R_R7  (SOC, SOC, Op_RegI,   7, R7->as_VMReg());
  86 reg_def R_R7x (SOC, SOC, Op_RegI, 255, R7->as_VMReg()->next());
  87 
  88 reg_def R_R8  (SOC, SOC, Op_RegI,   8, R8->as_VMReg());
  89 reg_def R_R8x (SOC, SOC, Op_RegI, 255, R8->as_VMReg()->next());
  90 reg_def R_R9  (SOC, SOC, Op_RegI,   9, R9->as_VMReg());
  91 reg_def R_R9x (SOC, SOC, Op_RegI, 255, R9->as_VMReg()->next());
  92 reg_def R_R10 (SOC, SOC, Op_RegI,  10, R10->as_VMReg());
  93 reg_def R_R10x(SOC, SOC, Op_RegI, 255, R10->as_VMReg()->next());
  94 reg_def R_R11 (SOC, SOC, Op_RegI,  11, R11->as_VMReg());
  95 reg_def R_R11x(SOC, SOC, Op_RegI, 255, R11->as_VMReg()->next());
  96 reg_def R_R12 (SOC, SOC, Op_RegI,  12, R12->as_VMReg());
  97 reg_def R_R12x(SOC, SOC, Op_RegI, 255, R12->as_VMReg()->next());
  98 reg_def R_R13 (SOC, SOC, Op_RegI,  13, R13->as_VMReg());
  99 reg_def R_R13x(SOC, SOC, Op_RegI, 255, R13->as_VMReg()->next());
 100 reg_def R_R14 (SOC, SOC, Op_RegI,  14, R14->as_VMReg());
 101 reg_def R_R14x(SOC, SOC, Op_RegI, 255, R14->as_VMReg()->next());
 102 reg_def R_R15 (SOC, SOC, Op_RegI,  15, R15->as_VMReg());
 103 reg_def R_R15x(SOC, SOC, Op_RegI, 255, R15->as_VMReg()->next());
 104 
 105 reg_def R_R16 (SOC, SOC, Op_RegI,  16, R16->as_VMReg()); // IP0
 106 reg_def R_R16x(SOC, SOC, Op_RegI, 255, R16->as_VMReg()->next());
 107 reg_def R_R17 (SOC, SOC, Op_RegI,  17, R17->as_VMReg()); // IP1
 108 reg_def R_R17x(SOC, SOC, Op_RegI, 255, R17->as_VMReg()->next());
 109 reg_def R_R18 (SOC, SOC, Op_RegI,  18, R18->as_VMReg()); // Platform Register
 110 reg_def R_R18x(SOC, SOC, Op_RegI, 255, R18->as_VMReg()->next());
 111 
 112 reg_def R_R19 (SOC, SOE, Op_RegI,  19, R19->as_VMReg());
 113 reg_def R_R19x(SOC, SOE, Op_RegI, 255, R19->as_VMReg()->next());
 114 reg_def R_R20 (SOC, SOE, Op_RegI,  20, R20->as_VMReg());
 115 reg_def R_R20x(SOC, SOE, Op_RegI, 255, R20->as_VMReg()->next());
 116 reg_def R_R21 (SOC, SOE, Op_RegI,  21, R21->as_VMReg());
 117 reg_def R_R21x(SOC, SOE, Op_RegI, 255, R21->as_VMReg()->next());
 118 reg_def R_R22 (SOC, SOE, Op_RegI,  22, R22->as_VMReg());
 119 reg_def R_R22x(SOC, SOE, Op_RegI, 255, R22->as_VMReg()->next());
 120 reg_def R_R23 (SOC, SOE, Op_RegI,  23, R23->as_VMReg());
 121 reg_def R_R23x(SOC, SOE, Op_RegI, 255, R23->as_VMReg()->next());
 122 reg_def R_R24 (SOC, SOE, Op_RegI,  24, R24->as_VMReg());
 123 reg_def R_R24x(SOC, SOE, Op_RegI, 255, R24->as_VMReg()->next());
 124 reg_def R_R25 (SOC, SOE, Op_RegI,  25, R25->as_VMReg());
 125 reg_def R_R25x(SOC, SOE, Op_RegI, 255, R25->as_VMReg()->next());
 126 reg_def R_R26 (SOC, SOE, Op_RegI,  26, R26->as_VMReg());
 127 reg_def R_R26x(SOC, SOE, Op_RegI, 255, R26->as_VMReg()->next());
 128 reg_def R_R27 (SOC, SOE, Op_RegI,  27, R27->as_VMReg());         // Rheap_base
 129 reg_def R_R27x(SOC, SOE, Op_RegI, 255, R27->as_VMReg()->next()); // Rheap_base
 130 reg_def R_R28 ( NS, SOE, Op_RegI,  28, R28->as_VMReg());         // TLS
 131 reg_def R_R28x( NS, SOE, Op_RegI, 255, R28->as_VMReg()->next()); // TLS
 132 
 133 reg_def R_R29 ( NS, SOE, Op_RegI,  29, R29->as_VMReg());         // FP
 134 reg_def R_R29x( NS, SOE, Op_RegI, 255, R29->as_VMReg()->next()); // FP
 135 reg_def R_R30 (SOC, SOC, Op_RegI,  30, R30->as_VMReg());         // LR
 136 reg_def R_R30x(SOC, SOC, Op_RegI, 255, R30->as_VMReg()->next()); // LR
 137 
 138 reg_def R_ZR ( NS,  NS, Op_RegI,  31, ZR->as_VMReg());  // ZR
 139 reg_def R_ZRx( NS,  NS, Op_RegI, 255, ZR->as_VMReg()->next()); // ZR
 140 
 141 // FIXME
 142 //reg_def R_SP ( NS,  NS, Op_RegP,  32, SP->as_VMReg());
 143 reg_def R_SP ( NS,  NS, Op_RegI,  32, SP->as_VMReg());
 144 //reg_def R_SPx( NS, NS, Op_RegP, 255, SP->as_VMReg()->next());
 145 reg_def R_SPx( NS,  NS, Op_RegI, 255, SP->as_VMReg()->next());
 146 
 147 // ----------------------------
 148 // Float/Double/Vector Registers
 149 // ----------------------------
 150 
 151 reg_def  R_V0(SOC, SOC, Op_RegF,  0,  V0->as_VMReg());
 152 reg_def  R_V1(SOC, SOC, Op_RegF,  1,  V1->as_VMReg());
 153 reg_def  R_V2(SOC, SOC, Op_RegF,  2,  V2->as_VMReg());
 154 reg_def  R_V3(SOC, SOC, Op_RegF,  3,  V3->as_VMReg());
 155 reg_def  R_V4(SOC, SOC, Op_RegF,  4,  V4->as_VMReg());
 156 reg_def  R_V5(SOC, SOC, Op_RegF,  5,  V5->as_VMReg());
 157 reg_def  R_V6(SOC, SOC, Op_RegF,  6,  V6->as_VMReg());
 158 reg_def  R_V7(SOC, SOC, Op_RegF,  7,  V7->as_VMReg());
 159 reg_def  R_V8(SOC, SOC, Op_RegF,  8,  V8->as_VMReg());
 160 reg_def  R_V9(SOC, SOC, Op_RegF,  9,  V9->as_VMReg());
 161 reg_def R_V10(SOC, SOC, Op_RegF, 10, V10->as_VMReg());
 162 reg_def R_V11(SOC, SOC, Op_RegF, 11, V11->as_VMReg());
 163 reg_def R_V12(SOC, SOC, Op_RegF, 12, V12->as_VMReg());
 164 reg_def R_V13(SOC, SOC, Op_RegF, 13, V13->as_VMReg());
 165 reg_def R_V14(SOC, SOC, Op_RegF, 14, V14->as_VMReg());
 166 reg_def R_V15(SOC, SOC, Op_RegF, 15, V15->as_VMReg());
 167 reg_def R_V16(SOC, SOC, Op_RegF, 16, V16->as_VMReg());
 168 reg_def R_V17(SOC, SOC, Op_RegF, 17, V17->as_VMReg());
 169 reg_def R_V18(SOC, SOC, Op_RegF, 18, V18->as_VMReg());
 170 reg_def R_V19(SOC, SOC, Op_RegF, 19, V19->as_VMReg());
 171 reg_def R_V20(SOC, SOC, Op_RegF, 20, V20->as_VMReg());
 172 reg_def R_V21(SOC, SOC, Op_RegF, 21, V21->as_VMReg());
 173 reg_def R_V22(SOC, SOC, Op_RegF, 22, V22->as_VMReg());
 174 reg_def R_V23(SOC, SOC, Op_RegF, 23, V23->as_VMReg());
 175 reg_def R_V24(SOC, SOC, Op_RegF, 24, V24->as_VMReg());
 176 reg_def R_V25(SOC, SOC, Op_RegF, 25, V25->as_VMReg());
 177 reg_def R_V26(SOC, SOC, Op_RegF, 26, V26->as_VMReg());
 178 reg_def R_V27(SOC, SOC, Op_RegF, 27, V27->as_VMReg());
 179 reg_def R_V28(SOC, SOC, Op_RegF, 28, V28->as_VMReg());
 180 reg_def R_V29(SOC, SOC, Op_RegF, 29, V29->as_VMReg());
 181 reg_def R_V30(SOC, SOC, Op_RegF, 30, V30->as_VMReg());
 182 reg_def R_V31(SOC, SOC, Op_RegF, 31, V31->as_VMReg());
 183 
 184 reg_def  R_V0b(SOC, SOC, Op_RegF, 255, V0->as_VMReg()->next(1));
 185 reg_def  R_V1b(SOC, SOC, Op_RegF, 255, V1->as_VMReg()->next(1));
 186 reg_def  R_V2b(SOC, SOC, Op_RegF, 255, V2->as_VMReg()->next(1));
 187 reg_def  R_V3b(SOC, SOC, Op_RegF,  3,  V3->as_VMReg()->next(1));
 188 reg_def  R_V4b(SOC, SOC, Op_RegF,  4,  V4->as_VMReg()->next(1));
 189 reg_def  R_V5b(SOC, SOC, Op_RegF,  5,  V5->as_VMReg()->next(1));
 190 reg_def  R_V6b(SOC, SOC, Op_RegF,  6,  V6->as_VMReg()->next(1));
 191 reg_def  R_V7b(SOC, SOC, Op_RegF,  7,  V7->as_VMReg()->next(1));
 192 reg_def  R_V8b(SOC, SOC, Op_RegF, 255, V8->as_VMReg()->next(1));
 193 reg_def  R_V9b(SOC, SOC, Op_RegF,  9,  V9->as_VMReg()->next(1));
 194 reg_def R_V10b(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(1));
 195 reg_def R_V11b(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(1));
 196 reg_def R_V12b(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(1));
 197 reg_def R_V13b(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(1));
 198 reg_def R_V14b(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(1));
 199 reg_def R_V15b(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(1));
 200 reg_def R_V16b(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(1));
 201 reg_def R_V17b(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(1));
 202 reg_def R_V18b(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(1));
 203 reg_def R_V19b(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(1));
 204 reg_def R_V20b(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(1));
 205 reg_def R_V21b(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(1));
 206 reg_def R_V22b(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(1));
 207 reg_def R_V23b(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(1));
 208 reg_def R_V24b(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(1));
 209 reg_def R_V25b(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(1));
 210 reg_def R_V26b(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(1));
 211 reg_def R_V27b(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(1));
 212 reg_def R_V28b(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(1));
 213 reg_def R_V29b(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(1));
 214 reg_def R_V30b(SOC, SOC, Op_RegD, 30, V30->as_VMReg()->next(1));
 215 reg_def R_V31b(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(1));
 216 
 217 reg_def  R_V0c(SOC, SOC, Op_RegF,  0,  V0->as_VMReg()->next(2));
 218 reg_def  R_V1c(SOC, SOC, Op_RegF,  1,  V1->as_VMReg()->next(2));
 219 reg_def  R_V2c(SOC, SOC, Op_RegF,  2,  V2->as_VMReg()->next(2));
 220 reg_def  R_V3c(SOC, SOC, Op_RegF,  3,  V3->as_VMReg()->next(2));
 221 reg_def  R_V4c(SOC, SOC, Op_RegF,  4,  V4->as_VMReg()->next(2));
 222 reg_def  R_V5c(SOC, SOC, Op_RegF,  5,  V5->as_VMReg()->next(2));
 223 reg_def  R_V6c(SOC, SOC, Op_RegF,  6,  V6->as_VMReg()->next(2));
 224 reg_def  R_V7c(SOC, SOC, Op_RegF,  7,  V7->as_VMReg()->next(2));
 225 reg_def  R_V8c(SOC, SOC, Op_RegF,  8,  V8->as_VMReg()->next(2));
 226 reg_def  R_V9c(SOC, SOC, Op_RegF,  9,  V9->as_VMReg()->next(2));
 227 reg_def R_V10c(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(2));
 228 reg_def R_V11c(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(2));
 229 reg_def R_V12c(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(2));
 230 reg_def R_V13c(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(2));
 231 reg_def R_V14c(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(2));
 232 reg_def R_V15c(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(2));
 233 reg_def R_V16c(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(2));
 234 reg_def R_V17c(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(2));
 235 reg_def R_V18c(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(2));
 236 reg_def R_V19c(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(2));
 237 reg_def R_V20c(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(2));
 238 reg_def R_V21c(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(2));
 239 reg_def R_V22c(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(2));
 240 reg_def R_V23c(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(2));
 241 reg_def R_V24c(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(2));
 242 reg_def R_V25c(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(2));
 243 reg_def R_V26c(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(2));
 244 reg_def R_V27c(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(2));
 245 reg_def R_V28c(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(2));
 246 reg_def R_V29c(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(2));
 247 reg_def R_V30c(SOC, SOC, Op_RegF, 30, V30->as_VMReg()->next(2));
 248 reg_def R_V31c(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(2));
 249 
 250 reg_def  R_V0d(SOC, SOC, Op_RegF,  0,  V0->as_VMReg()->next(3));
 251 reg_def  R_V1d(SOC, SOC, Op_RegF,  1,  V1->as_VMReg()->next(3));
 252 reg_def  R_V2d(SOC, SOC, Op_RegF,  2,  V2->as_VMReg()->next(3));
 253 reg_def  R_V3d(SOC, SOC, Op_RegF,  3,  V3->as_VMReg()->next(3));
 254 reg_def  R_V4d(SOC, SOC, Op_RegF,  4,  V4->as_VMReg()->next(3));
 255 reg_def  R_V5d(SOC, SOC, Op_RegF,  5,  V5->as_VMReg()->next(3));
 256 reg_def  R_V6d(SOC, SOC, Op_RegF,  6,  V6->as_VMReg()->next(3));
 257 reg_def  R_V7d(SOC, SOC, Op_RegF,  7,  V7->as_VMReg()->next(3));
 258 reg_def  R_V8d(SOC, SOC, Op_RegF,  8,  V8->as_VMReg()->next(3));
 259 reg_def  R_V9d(SOC, SOC, Op_RegF,  9,  V9->as_VMReg()->next(3));
 260 reg_def R_V10d(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(3));
 261 reg_def R_V11d(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(3));
 262 reg_def R_V12d(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(3));
 263 reg_def R_V13d(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(3));
 264 reg_def R_V14d(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(3));
 265 reg_def R_V15d(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(3));
 266 reg_def R_V16d(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(3));
 267 reg_def R_V17d(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(3));
 268 reg_def R_V18d(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(3));
 269 reg_def R_V19d(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(3));
 270 reg_def R_V20d(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(3));
 271 reg_def R_V21d(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(3));
 272 reg_def R_V22d(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(3));
 273 reg_def R_V23d(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(3));
 274 reg_def R_V24d(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(3));
 275 reg_def R_V25d(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(3));
 276 reg_def R_V26d(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(3));
 277 reg_def R_V27d(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(3));
 278 reg_def R_V28d(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(3));
 279 reg_def R_V29d(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(3));
 280 reg_def R_V30d(SOC, SOC, Op_RegF, 30, V30->as_VMReg()->next(3));
 281 reg_def R_V31d(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(3));
 282 
 283 // ----------------------------
 284 // Special Registers
 285 // Condition Codes Flag Registers
 286 reg_def APSR (SOC, SOC,  Op_RegFlags, 255, VMRegImpl::Bad());
 287 reg_def FPSCR(SOC, SOC,  Op_RegFlags, 255, VMRegImpl::Bad());
 288 
 289 // ----------------------------
 290 // Specify the enum values for the registers.  These enums are only used by the
 291 // OptoReg "class". We can convert these enum values at will to VMReg when needed
 292 // for visibility to the rest of the vm. The order of this enum influences the
 293 // register allocator so having the freedom to set this order and not be stuck
 294 // with the order that is natural for the rest of the vm is worth it.
 295 
 296 // Quad vector must be aligned here, so list them first.
 297 alloc_class fprs(
 298     R_V8,  R_V8b,  R_V8c,  R_V8d,  R_V9,  R_V9b,  R_V9c,  R_V9d,
 299     R_V10, R_V10b, R_V10c, R_V10d, R_V11, R_V11b, R_V11c, R_V11d,
 300     R_V12, R_V12b, R_V12c, R_V12d, R_V13, R_V13b, R_V13c, R_V13d,
 301     R_V14, R_V14b, R_V14c, R_V14d, R_V15, R_V15b, R_V15c, R_V15d,
 302     R_V16, R_V16b, R_V16c, R_V16d, R_V17, R_V17b, R_V17c, R_V17d,
 303     R_V18, R_V18b, R_V18c, R_V18d, R_V19, R_V19b, R_V19c, R_V19d,
 304     R_V20, R_V20b, R_V20c, R_V20d, R_V21, R_V21b, R_V21c, R_V21d,
 305     R_V22, R_V22b, R_V22c, R_V22d, R_V23, R_V23b, R_V23c, R_V23d,
 306     R_V24, R_V24b, R_V24c, R_V24d, R_V25, R_V25b, R_V25c, R_V25d,
 307     R_V26, R_V26b, R_V26c, R_V26d, R_V27, R_V27b, R_V27c, R_V27d,
 308     R_V28, R_V28b, R_V28c, R_V28d, R_V29, R_V29b, R_V29c, R_V29d,
 309     R_V30, R_V30b, R_V30c, R_V30d, R_V31, R_V31b, R_V31c, R_V31d,
 310     R_V0,  R_V0b,  R_V0c,  R_V0d,  R_V1,  R_V1b,  R_V1c,  R_V1d,
 311     R_V2,  R_V2b,  R_V2c,  R_V2d,  R_V3,  R_V3b,  R_V3c,  R_V3d,
 312     R_V4,  R_V4b,  R_V4c,  R_V4d,  R_V5,  R_V5b,  R_V5c,  R_V5d,
 313     R_V6,  R_V6b,  R_V6c,  R_V6d,  R_V7,  R_V7b,  R_V7c,  R_V7d
 314 );
 315 
 316 // Need double-register alignment here.
 317 // We are already quad-register aligned because of vectors above.
 318 alloc_class gprs(
 319     R_R0,  R_R0x,  R_R1,  R_R1x,  R_R2,  R_R2x,  R_R3,  R_R3x,
 320     R_R4,  R_R4x,  R_R5,  R_R5x,  R_R6,  R_R6x,  R_R7,  R_R7x,
 321     R_R8,  R_R8x,  R_R9,  R_R9x,  R_R10, R_R10x, R_R11, R_R11x,
 322     R_R12, R_R12x, R_R13, R_R13x, R_R14, R_R14x, R_R15, R_R15x,
 323     R_R16, R_R16x, R_R17, R_R17x, R_R18, R_R18x, R_R19, R_R19x,
 324     R_R20, R_R20x, R_R21, R_R21x, R_R22, R_R22x, R_R23, R_R23x,
 325     R_R24, R_R24x, R_R25, R_R25x, R_R26, R_R26x, R_R27, R_R27x,
 326     R_R28, R_R28x, R_R29, R_R29x, R_R30, R_R30x
 327 );
 328 // Continuing with double-reigister alignment...
 329 alloc_class chunk2(APSR, FPSCR);
 330 alloc_class chunk3(R_SP, R_SPx);
 331 alloc_class chunk4(R_ZR, R_ZRx);
 332 
 333 //----------Architecture Description Register Classes--------------------------
 334 // Several register classes are automatically defined based upon information in
 335 // this architecture description.
 336 // 1) reg_class inline_cache_reg           ( as defined in frame section )
 337 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
 338 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 339 //
 340 
 341 // ----------------------------
 342 // Integer Register Classes
 343 // ----------------------------
 344 reg_class int_reg_all(R_R0,  R_R1,  R_R2,  R_R3,  R_R4,  R_R5,  R_R6,  R_R7,
 345                       R_R8,  R_R9,  R_R10, R_R11, R_R12, R_R13, R_R14, R_R15,
 346                       R_R16, R_R17, R_R18, R_R19, R_R20, R_R21, R_R22, R_R23,
 347                       R_R24, R_R25, R_R26, R_R27, R_R28, R_R29, R_R30
 348 );
 349 
 350 // Exclusions from i_reg:
 351 // SP (R31)
 352 // Rthread/R28: reserved by HotSpot to the TLS register (invariant within Java)
 353 reg_class int_reg %{
 354     return _INT_REG_mask;
 355 %}
 356 reg_class ptr_reg %{
 357     return _PTR_REG_mask;
 358 %}
 359 reg_class vectorx_reg %{
 360     return _VECTORX_REG_mask;
 361 %}
 362 
 363 reg_class R0_regI(R_R0);
 364 reg_class R1_regI(R_R1);
 365 reg_class R2_regI(R_R2);
 366 reg_class R3_regI(R_R3);
 367 //reg_class R12_regI(R_R12);
 368 
 369 // ----------------------------
 370 // Pointer Register Classes
 371 // ----------------------------
 372 
 373 // Special class for storeP instructions, which can store SP or RPC to TLS.
 374 // It is also used for memory addressing, allowing direct TLS addressing.
 375 
 376 reg_class sp_ptr_reg %{
 377     return _SP_PTR_REG_mask;
 378 %}
 379 
 380 reg_class store_reg %{
 381     return _STR_REG_mask;
 382 %}
 383 
 384 reg_class store_ptr_reg %{
 385     return _STR_PTR_REG_mask;
 386 %}
 387 
 388 reg_class spillP_reg %{
 389     return _SPILLP_REG_mask;
 390 %}
 391 
 392 // Other special pointer regs
 393 reg_class R0_regP(R_R0, R_R0x);
 394 reg_class R1_regP(R_R1, R_R1x);
 395 reg_class R2_regP(R_R2, R_R2x);
 396 reg_class Rexception_regP(R_R19, R_R19x);
 397 reg_class Ricklass_regP(R_R8, R_R8x);
 398 reg_class Rmethod_regP(R_R27, R_R27x);
 399 
 400 reg_class Rthread_regP(R_R28, R_R28x);
 401 reg_class IP_regP(R_R16, R_R16x);
 402 #define RtempRegP IPRegP
 403 reg_class LR_regP(R_R30, R_R30x);
 404 
 405 reg_class SP_regP(R_SP,  R_SPx);
 406 reg_class FP_regP(R_R29, R_R29x);
 407 
 408 reg_class ZR_regP(R_ZR, R_ZRx);
 409 reg_class ZR_regI(R_ZR);
 410 
 411 // ----------------------------
 412 // Long Register Classes
 413 // ----------------------------
 414 reg_class long_reg %{ return _PTR_REG_mask; %}
 415 // for ldrexd, strexd: first reg of pair must be even
 416 reg_class long_reg_align %{ return LONG_REG_mask(); %}
 417 
 418 reg_class R0_regL(R_R0,R_R0x); // arg 1 or return value
 419 
 420 // ----------------------------
 421 // Special Class for Condition Code Flags Register
 422 reg_class int_flags(APSR);
 423 reg_class float_flags(FPSCR);
 424 
 425 
 426 // ----------------------------
 427 // Float Point Register Classes
 428 // ----------------------------
 429 reg_class sflt_reg_0(
 430   R_V0,  R_V1,  R_V2,  R_V3,  R_V4,  R_V5,  R_V6,  R_V7,
 431   R_V8,  R_V9,  R_V10, R_V11, R_V12, R_V13, R_V14, R_V15,
 432   R_V16, R_V17, R_V18, R_V19, R_V20, R_V21, R_V22, R_V23,
 433   R_V24, R_V25, R_V26, R_V27, R_V28, R_V29, R_V30, R_V31);
 434 
 435 reg_class sflt_reg %{
 436     return _SFLT_REG_mask;
 437 %}
 438 
 439 reg_class dflt_low_reg %{
 440     return _DFLT_REG_mask;
 441 %}
 442 
 443 reg_class actual_dflt_reg %{
 444     return _DFLT_REG_mask;
 445 %}
 446 
 447 reg_class vectorx_reg_0(
 448   R_V0,  R_V1,  R_V2,  R_V3,  R_V4,  R_V5, R_V6, R_V7,
 449   R_V8,  R_V9,  R_V10, R_V11, R_V12, R_V13, R_V14, R_V15,
 450   R_V16, R_V17, R_V18, R_V19, R_V20, R_V21, R_V22, R_V23,
 451   R_V24, R_V25, R_V26, R_V27, R_V28, R_V29, R_V30, /*R_V31,*/
 452   R_V0b,  R_V1b,  R_V2b,  R_V3b,  R_V4b,  R_V5b,  R_V6b,  R_V7b,
 453   R_V8b,  R_V9b,  R_V10b, R_V11b, R_V12b, R_V13b, R_V14b, R_V15b,
 454   R_V16b, R_V17b, R_V18b, R_V19b, R_V20b, R_V21b, R_V22b, R_V23b,
 455   R_V24b, R_V25b, R_V26b, R_V27b, R_V28b, R_V29b, R_V30b, /*R_V31b,*/
 456   R_V0c,  R_V1c,  R_V2c,  R_V3c,  R_V4c,  R_V5c,  R_V6c,  R_V7c,
 457   R_V8c,  R_V9c,  R_V10c, R_V11c, R_V12c, R_V13c, R_V14c, R_V15c,
 458   R_V16c, R_V17c, R_V18c, R_V19c, R_V20c, R_V21c, R_V22c, R_V23c,
 459   R_V24c, R_V25c, R_V26c, R_V27c, R_V28c, R_V29c, R_V30c, /*R_V31c,*/
 460   R_V0d,  R_V1d,  R_V2d,  R_V3d,  R_V4d,  R_V5d,  R_V6d,  R_V7d,
 461   R_V8d,  R_V9d,  R_V10d, R_V11d, R_V12d, R_V13d, R_V14d, R_V15d,
 462   R_V16d, R_V17d, R_V18d, R_V19d, R_V20d, R_V21d, R_V22d, R_V23d,
 463   R_V24d, R_V25d, R_V26d, R_V27d, R_V28d, R_V29d, R_V30d, /*R_V31d*/);
 464 
 465 reg_class Rmemcopy_reg %{
 466     return _RMEMCOPY_REG_mask;
 467 %}
 468 
 469 %}
 470 
 471 source_hpp %{
 472 
 473 const MachRegisterNumbers R_mem_copy_lo_num = R_V31_num;
 474 const MachRegisterNumbers R_mem_copy_hi_num = R_V31b_num;
 475 const FloatRegister Rmemcopy = V31;
 476 
 477 const MachRegisterNumbers R_hf_ret_lo_num = R_V0_num;
 478 const MachRegisterNumbers R_hf_ret_hi_num = R_V0b_num;
 479 const FloatRegister Rhfret = V0;
 480 
 481 extern OptoReg::Name R_Ricklass_num;
 482 extern OptoReg::Name R_Rmethod_num;
 483 extern OptoReg::Name R_tls_num;
 484 extern OptoReg::Name R_Rheap_base_num;
 485 
 486 extern RegMask _INT_REG_mask;
 487 extern RegMask _PTR_REG_mask;
 488 extern RegMask _SFLT_REG_mask;
 489 extern RegMask _DFLT_REG_mask;
 490 extern RegMask _VECTORX_REG_mask;
 491 extern RegMask _RMEMCOPY_REG_mask;
 492 extern RegMask _SP_PTR_REG_mask;
 493 extern RegMask _SPILLP_REG_mask;
 494 extern RegMask _STR_REG_mask;
 495 extern RegMask _STR_PTR_REG_mask;
 496 
 497 #define LDR_DOUBLE "LDR_D"
 498 #define LDR_FLOAT  "LDR_S"
 499 #define STR_DOUBLE "STR_D"
 500 #define STR_FLOAT  "STR_S"
 501 #define STR_64     "STR"
 502 #define LDR_64     "LDR"
 503 #define STR_32     "STR_W"
 504 #define LDR_32     "LDR_W"
 505 #define MOV_DOUBLE "FMOV_D"
 506 #define MOV_FLOAT  "FMOV_S"
 507 #define FMSR       "FMOV_SW"
 508 #define FMRS       "FMOV_WS"
 509 #define LDREX      "ldxr  "
 510 #define STREX      "stxr  "
 511 
 512 #define str_64     str
 513 #define ldr_64     ldr
 514 #define ldr_32     ldr_w
 515 #define ldrex      ldxr
 516 #define strex      stxr
 517 
 518 #define fmsr       fmov_sw
 519 #define fmrs       fmov_ws
 520 #define fconsts    fmov_s
 521 #define fconstd    fmov_d
 522 
 523 static inline bool is_uimm12(jlong imm, int shift) {
 524   return Assembler::is_unsigned_imm_in_range(imm, 12, shift);
 525 }
 526 
 527 static inline bool is_memoryD(int offset) {
 528   int scale = 3; // LogBytesPerDouble
 529   return is_uimm12(offset, scale);
 530 }
 531 
 532 static inline bool is_memoryfp(int offset) {
 533   int scale = LogBytesPerInt; // include 32-bit word accesses
 534   return is_uimm12(offset, scale);
 535 }
 536 
 537 static inline bool is_memoryI(int offset) {
 538   int scale = LogBytesPerInt;
 539   return is_uimm12(offset, scale);
 540 }
 541 
 542 static inline bool is_memoryP(int offset) {
 543   int scale = LogBytesPerWord;
 544   return is_uimm12(offset, scale);
 545 }
 546 
 547 static inline bool is_memoryHD(int offset) {
 548   int scale = LogBytesPerInt; // include 32-bit word accesses
 549   return is_uimm12(offset, scale);
 550 }
 551 
 552 uintx limmL_low(uintx imm, int n);
 553 
 554 static inline bool Xis_aimm(int imm) {
 555   return Assembler::ArithmeticImmediate(imm).is_encoded();
 556 }
 557 
 558 static inline bool is_aimm(intptr_t imm) {
 559   return Assembler::ArithmeticImmediate(imm).is_encoded();
 560 }
 561 
 562 static inline bool is_limmL(uintptr_t imm) {
 563   return Assembler::LogicalImmediate(imm).is_encoded();
 564 }
 565 
 566 static inline bool is_limmL_low(intptr_t imm, int n) {
 567   return is_limmL(limmL_low(imm, n));
 568 }
 569 
 570 static inline bool is_limmI(jint imm) {
 571   return Assembler::LogicalImmediate(imm, true).is_encoded();
 572 }
 573 
 574 static inline uintx limmI_low(jint imm, int n) {
 575   return limmL_low(imm, n);
 576 }
 577 
 578 static inline bool is_limmI_low(jint imm, int n) {
 579   return is_limmL_low(imm, n);
 580 }
 581 
 582 %}
 583 
 584 source %{
 585 
 586 // Given a register encoding, produce a Integer Register object
 587 static Register reg_to_register_object(int register_encoding) {
 588   assert(R0->encoding() == R_R0_enc && R30->encoding() == R_R30_enc, "right coding");
 589   assert(Rthread->encoding() == R_R28_enc, "right coding");
 590   assert(SP->encoding() == R_SP_enc, "right coding");
 591   return as_Register(register_encoding);
 592 }
 593 
 594 // Given a register encoding, produce a single-precision Float Register object
 595 static FloatRegister reg_to_FloatRegister_object(int register_encoding) {
 596   assert(V0->encoding() == R_V0_enc && V31->encoding() == R_V31_enc, "right coding");
 597   return as_FloatRegister(register_encoding);
 598 }
 599 
 600 RegMask _INT_REG_mask;
 601 RegMask _PTR_REG_mask;
 602 RegMask _SFLT_REG_mask;
 603 RegMask _DFLT_REG_mask;
 604 RegMask _VECTORX_REG_mask;
 605 RegMask _RMEMCOPY_REG_mask;
 606 RegMask _SP_PTR_REG_mask;
 607 RegMask _SPILLP_REG_mask;
 608 RegMask _STR_REG_mask;
 609 RegMask _STR_PTR_REG_mask;
 610 
 611 OptoReg::Name R_Ricklass_num = -1;
 612 OptoReg::Name R_Rmethod_num  = -1;
 613 OptoReg::Name R_tls_num      = -1;
 614 OptoReg::Name R_Rtemp_num    = -1;
 615 OptoReg::Name R_Rheap_base_num = -1;
 616 
 617 static int mov_oop_size = -1;
 618 
 619 #ifdef ASSERT
 620 static bool same_mask(const RegMask &a, const RegMask &b) {
 621     RegMask a_sub_b = a; a_sub_b.SUBTRACT(b);
 622     RegMask b_sub_a = b; b_sub_a.SUBTRACT(a);
 623     return a_sub_b.Size() == 0 && b_sub_a.Size() == 0;
 624 }
 625 #endif
 626 
 627 void Compile::pd_compiler2_init() {
 628 
 629     R_Ricklass_num = OptoReg::as_OptoReg(Ricklass->as_VMReg());
 630     R_Rmethod_num  = OptoReg::as_OptoReg(Rmethod->as_VMReg());
 631     R_tls_num      = OptoReg::as_OptoReg(Rthread->as_VMReg());
 632     R_Rtemp_num    = OptoReg::as_OptoReg(Rtemp->as_VMReg());
 633     R_Rheap_base_num = OptoReg::as_OptoReg(Rheap_base->as_VMReg());
 634 
 635     _INT_REG_mask = _INT_REG_ALL_mask;
 636     _INT_REG_mask.Remove(R_tls_num);
 637     _INT_REG_mask.Remove(R_SP_num);
 638     if (UseCompressedOops) {
 639       _INT_REG_mask.Remove(R_Rheap_base_num);
 640     }
 641     // Remove Rtemp because safepoint poll can trash it
 642     // (see SharedRuntime::generate_handler_blob)
 643     _INT_REG_mask.Remove(R_Rtemp_num);
 644 
 645     _PTR_REG_mask = _INT_REG_mask;
 646     _PTR_REG_mask.smear_to_sets(2);
 647 
 648     // STR_REG    = INT_REG+ZR
 649     // SPILLP_REG = INT_REG+SP
 650     // SP_PTR_REG = INT_REG+SP+TLS
 651     _STR_REG_mask = _INT_REG_mask;
 652     _SP_PTR_REG_mask = _STR_REG_mask;
 653     _STR_REG_mask.Insert(R_ZR_num);
 654     _SP_PTR_REG_mask.Insert(R_SP_num);
 655     _SPILLP_REG_mask = _SP_PTR_REG_mask;
 656     _SP_PTR_REG_mask.Insert(R_tls_num);
 657     _STR_PTR_REG_mask = _STR_REG_mask;
 658     _STR_PTR_REG_mask.smear_to_sets(2);
 659     _SP_PTR_REG_mask.smear_to_sets(2);
 660     _SPILLP_REG_mask.smear_to_sets(2);
 661 
 662     _RMEMCOPY_REG_mask = RegMask(R_mem_copy_lo_num);
 663 assert(OptoReg::as_OptoReg(Rmemcopy->as_VMReg()) == R_mem_copy_lo_num, "!");
 664 
 665     _SFLT_REG_mask = _SFLT_REG_0_mask;
 666     _SFLT_REG_mask.SUBTRACT(_RMEMCOPY_REG_mask);
 667     _DFLT_REG_mask = _SFLT_REG_mask;
 668     _DFLT_REG_mask.smear_to_sets(2);
 669     _VECTORX_REG_mask = _SFLT_REG_mask;
 670     _VECTORX_REG_mask.smear_to_sets(4);
 671     assert(same_mask(_VECTORX_REG_mask, _VECTORX_REG_0_mask), "!");
 672 
 673 #ifdef ASSERT
 674     RegMask r((RegMask *)&SFLT_REG_mask());
 675     r.smear_to_sets(2);
 676     assert(same_mask(r, _DFLT_REG_mask), "!");
 677 #endif
 678 
 679     if (VM_Version::prefer_moves_over_load_literal()) {
 680       mov_oop_size = 4;
 681     } else {
 682       mov_oop_size = 1;
 683     }
 684 
 685     assert(Matcher::interpreter_method_oop_reg_encode() == Rmethod->encoding(), "should be");
 686 }
 687 
 688 uintx limmL_low(uintx imm, int n) {
 689   // 1: try as is
 690   if (is_limmL(imm)) {
 691     return imm;
 692   }
 693   // 2: try low bits + all 0's
 694   uintx imm0 = imm & right_n_bits(n);
 695   if (is_limmL(imm0)) {
 696     return imm0;
 697   }
 698   // 3: try low bits + all 1's
 699   uintx imm1 = imm0 | left_n_bits(BitsPerWord - n);
 700   if (is_limmL(imm1)) {
 701     return imm1;
 702   }
 703 #if 0
 704   // 4: try low bits replicated
 705   int field = 1 << log2_intptr(n + n - 1);
 706   assert(field >= n, "!");
 707   assert(field / n == 1, "!");
 708   intptr_t immr = immx;
 709   while (field < BitsPerWord) {
 710     intrptr_t bits = immr & right_n_bits(field);
 711     immr = bits | (bits << field);
 712     field = field << 1;
 713   }
 714   // replicate at power-of-2 boundary
 715   if (is_limmL(immr)) {
 716     return immr;
 717   }
 718 #endif
 719   return imm;
 720 }
 721 
 722 // Convert the raw encoding form into the form expected by the
 723 // constructor for Address.
 724 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
 725   RelocationHolder rspec;
 726   if (disp_reloc != relocInfo::none) {
 727     rspec = Relocation::spec_simple(disp_reloc);
 728   }
 729 
 730   Register rbase = (base == 0xff) ? SP : as_Register(base);
 731   if (index != 0xff) {
 732     Register rindex = as_Register(index);
 733     if (disp == 0x7fffffff) { // special value to indicate sign-extend
 734       Address madr(rbase, rindex, ex_sxtw, scale);
 735       madr._rspec = rspec;
 736       return madr;
 737     } else {
 738       assert(disp == 0, "unsupported");
 739       Address madr(rbase, rindex, ex_lsl, scale);
 740       madr._rspec = rspec;
 741       return madr;
 742     }
 743   } else {
 744     assert(scale == 0, "not supported");
 745     Address madr(rbase, disp);
 746     madr._rspec = rspec;
 747     return madr;
 748   }
 749 }
 750 
 751 // Location of compiled Java return values.  Same as C
 752 OptoRegPair c2::return_value(int ideal_reg) {
 753   assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
 754   static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, R_R0_num,     R_R0_num,  R_hf_ret_lo_num,  R_hf_ret_lo_num, R_R0_num };
 755   static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, R_R0x_num, OptoReg::Bad,     R_hf_ret_hi_num, R_R0x_num };
 756   return OptoRegPair( hi[ideal_reg], lo[ideal_reg]);
 757 }
 758 
 759 // !!!!! Special hack to get all type of calls to specify the byte offset
 760 //       from the start of the call to the point where the return address
 761 //       will point.
 762 
 763 int MachCallStaticJavaNode::ret_addr_offset() {
 764   bool far = (_method == NULL) ? maybe_far_call(this) : !cache_reachable();
 765   bool patchable = _method != NULL;
 766   int call_size = MacroAssembler::call_size(entry_point(), far, patchable);
 767   return (call_size + (_method_handle_invoke ? 1 : 0)) * NativeInstruction::instruction_size;
 768 }
 769 
 770 int MachCallDynamicJavaNode::ret_addr_offset() {
 771   bool far = !cache_reachable();
 772   int call_size = MacroAssembler::call_size(entry_point(), far, true);
 773   return (mov_oop_size + call_size) * NativeInstruction::instruction_size; 
 774 }
 775 
 776 int MachCallRuntimeNode::ret_addr_offset() {
 777   int call_size = 0;
 778   // TODO: check if Leaf nodes also need this
 779   if (!is_MachCallLeaf()) {
 780     // adr $temp, ret_addr
 781     // str $temp, [SP + last_java_pc]
 782     call_size += 2;
 783   }
 784   // bl or mov_slow; blr
 785   bool far = maybe_far_call(this);
 786   call_size += MacroAssembler::call_size(entry_point(), far, false);
 787   return call_size * NativeInstruction::instruction_size;
 788 }
 789 
 790 %}
 791 
 792 // The intptr_t operand types, defined by textual substitution.
 793 // (Cf. opto/type.hpp.  This lets us avoid many, many other ifdefs.)
 794 #define immX      immL
 795 #define iRegX     iRegL
 796 #define aimmX     aimmL
 797 #define limmX     limmL
 798 #define immX9     immL9
 799 #define LShiftX   LShiftL
 800 #define shimmX    immU6
 801 
 802 #define store_RegLd store_RegL
 803 
 804 //----------ATTRIBUTES---------------------------------------------------------
 805 //----------Operand Attributes-------------------------------------------------
 806 op_attrib op_cost(1);          // Required cost attribute
 807 
 808 //----------OPERANDS-----------------------------------------------------------
 809 // Operand definitions must precede instruction definitions for correct parsing
 810 // in the ADLC because operands constitute user defined types which are used in
 811 // instruction definitions.
 812 
 813 //----------Simple Operands----------------------------------------------------
 814 // Immediate Operands
 815 
 816 // Integer Immediate: 9-bit (including sign bit), so same as immI8?
 817 // FIXME: simm9 allows -256, but immI8 doesn't...
 818 operand simm9() %{
 819   predicate(Assembler::is_imm_in_range(n->get_int(), 9, 0));
 820   match(ConI);
 821   op_cost(0);
 822 
 823   format %{ %}
 824   interface(CONST_INTER);
 825 %}
 826 
 827 
 828 operand uimm12() %{
 829   predicate(Assembler::is_unsigned_imm_in_range(n->get_int(), 12, 0));
 830   match(ConI);
 831   op_cost(0);
 832 
 833   format %{ %}
 834   interface(CONST_INTER);
 835 %}
 836 
 837 operand aimmP() %{
 838   predicate(n->get_ptr() == 0 || (is_aimm(n->get_ptr()) && ((ConPNode*)n)->type()->reloc() == relocInfo::none));
 839   match(ConP);
 840 
 841   op_cost(0);
 842   // formats are generated automatically for constants and base registers
 843   format %{ %}
 844   interface(CONST_INTER);
 845 %}
 846 
 847 // Long Immediate: 12-bit - for addressing mode
 848 operand immL12() %{
 849   predicate((-4096 < n->get_long()) && (n->get_long() < 4096));
 850   match(ConL);
 851   op_cost(0);
 852 
 853   format %{ %}
 854   interface(CONST_INTER);
 855 %}
 856 
 857 // Long Immediate: 9-bit - for addressing mode
 858 operand immL9() %{
 859   predicate((-256 <= n->get_long()) && (n->get_long() < 256));
 860   match(ConL);
 861   op_cost(0);
 862 
 863   format %{ %}
 864   interface(CONST_INTER);
 865 %}
 866 
 867 operand immIMov() %{
 868   predicate(n->get_int() >> 16 == 0);
 869   match(ConI);
 870   op_cost(0);
 871 
 872   format %{ %}
 873   interface(CONST_INTER);
 874 %}
 875 
 876 operand immLMov() %{
 877   predicate(n->get_long() >> 16 == 0);
 878   match(ConL);
 879   op_cost(0);
 880 
 881   format %{ %}
 882   interface(CONST_INTER);
 883 %}
 884 
 885 operand immUL12() %{
 886   predicate(is_uimm12(n->get_long(), 0));
 887   match(ConL);
 888   op_cost(0);
 889 
 890   format %{ %}
 891   interface(CONST_INTER);
 892 %}
 893 
 894 operand immUL12x2() %{
 895   predicate(is_uimm12(n->get_long(), 1));
 896   match(ConL);
 897   op_cost(0);
 898 
 899   format %{ %}
 900   interface(CONST_INTER);
 901 %}
 902 
 903 operand immUL12x4() %{
 904   predicate(is_uimm12(n->get_long(), 2));
 905   match(ConL);
 906   op_cost(0);
 907 
 908   format %{ %}
 909   interface(CONST_INTER);
 910 %}
 911 
 912 operand immUL12x8() %{
 913   predicate(is_uimm12(n->get_long(), 3));
 914   match(ConL);
 915   op_cost(0);
 916 
 917   format %{ %}
 918   interface(CONST_INTER);
 919 %}
 920 
 921 operand immUL12x16() %{
 922   predicate(is_uimm12(n->get_long(), 4));
 923   match(ConL);
 924   op_cost(0);
 925 
 926   format %{ %}
 927   interface(CONST_INTER);
 928 %}
 929 
 930 // Used for long shift
 931 operand immU6() %{
 932   predicate(0 <= n->get_int() && (n->get_int() <= 63));
 933   match(ConI);
 934   op_cost(0);
 935 
 936   format %{ %}
 937   interface(CONST_INTER);
 938 %}
 939 
 940 // Used for register extended shift
 941 operand immI_0_4() %{
 942   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 943   match(ConI);
 944   op_cost(0);
 945 
 946   format %{ %}
 947   interface(CONST_INTER);
 948 %}
 949 
 950 // Compressed Pointer Register
 951 operand iRegN() %{
 952   constraint(ALLOC_IN_RC(int_reg));
 953   match(RegN);
 954   match(ZRRegN);
 955 
 956   format %{ %}
 957   interface(REG_INTER);
 958 %}
 959 
 960 operand SPRegP() %{
 961   constraint(ALLOC_IN_RC(SP_regP));
 962   match(RegP);
 963 
 964   format %{ %}
 965   interface(REG_INTER);
 966 %}
 967 
 968 operand ZRRegP() %{
 969   constraint(ALLOC_IN_RC(ZR_regP));
 970   match(RegP);
 971 
 972   format %{ %}
 973   interface(REG_INTER);
 974 %}
 975 
 976 operand ZRRegL() %{
 977   constraint(ALLOC_IN_RC(ZR_regP));
 978   match(RegL);
 979 
 980   format %{ %}
 981   interface(REG_INTER);
 982 %}
 983 
 984 operand ZRRegI() %{
 985   constraint(ALLOC_IN_RC(ZR_regI));
 986   match(RegI);
 987 
 988   format %{ %}
 989   interface(REG_INTER);
 990 %}
 991 
 992 operand ZRRegN() %{
 993   constraint(ALLOC_IN_RC(ZR_regI));
 994   match(RegN);
 995 
 996   format %{ %}
 997   interface(REG_INTER);
 998 %}