1 /* 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_LIR.hpp" 30 #include "runtime/sharedRuntime.hpp" 31 #include "vmreg_aarch32.inline.hpp" 32 33 LIR_Opr FrameMap::r0_opr; 34 LIR_Opr FrameMap::r1_opr; 35 LIR_Opr FrameMap::r2_opr; 36 LIR_Opr FrameMap::r3_opr; 37 LIR_Opr FrameMap::r4_opr; 38 LIR_Opr FrameMap::r5_opr; 39 LIR_Opr FrameMap::r6_opr; 40 LIR_Opr FrameMap::r7_opr; 41 LIR_Opr FrameMap::r8_opr; 42 LIR_Opr FrameMap::r9_opr; 43 LIR_Opr FrameMap::r10_opr; 44 LIR_Opr FrameMap::r11_opr; 45 LIR_Opr FrameMap::r12_opr; 46 LIR_Opr FrameMap::r13_opr; 47 LIR_Opr FrameMap::r14_opr; 48 LIR_Opr FrameMap::r15_opr; 49 50 LIR_Opr FrameMap::r0_oop_opr; 51 LIR_Opr FrameMap::r1_oop_opr; 52 LIR_Opr FrameMap::r2_oop_opr; 53 LIR_Opr FrameMap::r3_oop_opr; 54 LIR_Opr FrameMap::r4_oop_opr; 55 LIR_Opr FrameMap::r5_oop_opr; 56 LIR_Opr FrameMap::r6_oop_opr; 57 LIR_Opr FrameMap::r7_oop_opr; 58 LIR_Opr FrameMap::r8_oop_opr; 59 LIR_Opr FrameMap::r9_oop_opr; 60 LIR_Opr FrameMap::r10_oop_opr; 61 LIR_Opr FrameMap::r11_oop_opr; 62 LIR_Opr FrameMap::r12_oop_opr; 63 LIR_Opr FrameMap::r13_oop_opr; 64 LIR_Opr FrameMap::r14_oop_opr; 65 LIR_Opr FrameMap::r15_oop_opr; 66 67 LIR_Opr FrameMap::r0_metadata_opr; 68 LIR_Opr FrameMap::r1_metadata_opr; 69 LIR_Opr FrameMap::r2_metadata_opr; 70 LIR_Opr FrameMap::r3_metadata_opr; 71 LIR_Opr FrameMap::r4_metadata_opr; 72 LIR_Opr FrameMap::r5_metadata_opr; 73 74 LIR_Opr FrameMap::sp_opr; 75 LIR_Opr FrameMap::receiver_opr; 76 77 LIR_Opr FrameMap::rscratch1_opr; 78 LIR_Opr FrameMap::rscratch2_opr; 79 LIR_Opr FrameMap::rscratch_long_opr; 80 81 LIR_Opr FrameMap::long0_opr; 82 LIR_Opr FrameMap::long1_opr; 83 LIR_Opr FrameMap::long2_opr; 84 LIR_Opr FrameMap::fpu0_float_opr; 85 LIR_Opr FrameMap::fpu0_double_opr; 86 87 LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, }; 88 LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; 89 90 void FrameMap::initialize() { 91 assert(!_init_done, "must be called once"); 92 93 int i = 0; 94 map_register(i, r0); r0_opr = LIR_OprFact::single_cpu(i); i++; 95 map_register(i, r1); r1_opr = LIR_OprFact::single_cpu(i); i++; 96 map_register(i, r2); r2_opr = LIR_OprFact::single_cpu(i); i++; 97 map_register(i, r3); r3_opr = LIR_OprFact::single_cpu(i); i++; 98 map_register(i, r4); r4_opr = LIR_OprFact::single_cpu(i); i++; 99 map_register(i, r5); r5_opr = LIR_OprFact::single_cpu(i); i++; 100 map_register(i, r6); r6_opr = LIR_OprFact::single_cpu(i); i++; 101 map_register(i, r7); r7_opr = LIR_OprFact::single_cpu(i); i++; 102 map_register(i, r8); r8_opr = LIR_OprFact::single_cpu(i); i++; 103 // Mapping lines in this block may be arbitrarily mixed, but all allocatable 104 // registers should go above this comment, and unallocatable registers - 105 // below. 106 map_register(i, r9); r9_opr = LIR_OprFact::single_cpu(i); i++; // rscratch1 107 map_register(i, r10); r10_opr = LIR_OprFact::single_cpu(i); i++; // rthread 108 map_register(i, r11); r11_opr = LIR_OprFact::single_cpu(i); i++; // rfp 109 map_register(i, r12); r12_opr = LIR_OprFact::single_cpu(i); i++; // rscratch2 110 map_register(i, r13); r13_opr = LIR_OprFact::single_cpu(i); i++; // sp 111 map_register(i, r14); r14_opr = LIR_OprFact::single_cpu(i); i++; // lr 112 map_register(i, r15); r15_opr = LIR_OprFact::single_cpu(i); i++; // r15_pc 113 114 // This flag must be set after all integer registers are mapped but before 115 // the first use of as_*_opr() methods. 116 _init_done = true; 117 118 r0_oop_opr = as_oop_opr(r0); 119 r1_oop_opr = as_oop_opr(r1); 120 r2_oop_opr = as_oop_opr(r2); 121 r3_oop_opr = as_oop_opr(r3); 122 r4_oop_opr = as_oop_opr(r4); 123 r5_oop_opr = as_oop_opr(r5); 124 r6_oop_opr = as_oop_opr(r6); 125 r7_oop_opr = as_oop_opr(r7); 126 r8_oop_opr = as_oop_opr(r8); 127 r9_oop_opr = as_oop_opr(r9); 128 r10_oop_opr = as_oop_opr(r10); 129 r11_oop_opr = as_oop_opr(r11); 130 r12_oop_opr = as_oop_opr(r12); 131 r13_oop_opr = as_oop_opr(r13); 132 r14_oop_opr = as_oop_opr(r14); 133 r15_oop_opr = as_oop_opr(r15); 134 135 r0_metadata_opr = as_metadata_opr(r0); 136 r1_metadata_opr = as_metadata_opr(r1); 137 r2_metadata_opr = as_metadata_opr(r2); 138 r3_metadata_opr = as_metadata_opr(r3); 139 r4_metadata_opr = as_metadata_opr(r4); 140 r5_metadata_opr = as_metadata_opr(r5); 141 142 sp_opr = as_pointer_opr(sp); 143 144 VMRegPair regs; 145 BasicType sig_bt = T_OBJECT; 146 SharedRuntime::java_calling_convention(&sig_bt, ®s, 1, true); 147 receiver_opr = as_oop_opr(regs.first()->as_Register()); 148 149 rscratch1_opr = as_opr(rscratch1); 150 rscratch2_opr = as_opr(rscratch2); 151 rscratch_long_opr = as_long_opr(rscratch1, rscratch2); 152 153 long0_opr = as_long_opr(r0, r1); 154 long1_opr = as_long_opr(r2, r3); 155 long2_opr = as_long_opr(r4, r5); 156 fpu0_float_opr = LIR_OprFact::single_fpu(0); 157 fpu0_double_opr = LIR_OprFact::double_fpu(0, 1); 158 159 _caller_save_cpu_regs[0] = r0_opr; 160 _caller_save_cpu_regs[1] = r1_opr; 161 _caller_save_cpu_regs[2] = r2_opr; 162 _caller_save_cpu_regs[3] = r3_opr; 163 _caller_save_cpu_regs[4] = r4_opr; 164 _caller_save_cpu_regs[5] = r5_opr; 165 _caller_save_cpu_regs[6] = r6_opr; 166 _caller_save_cpu_regs[7] = r7_opr; 167 _caller_save_cpu_regs[8] = r8_opr; 168 169 for (i = 0; i < nof_caller_save_fpu_regs; i++) { 170 _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); 171 } 172 } 173 174 LIR_Opr FrameMap::stack_pointer() { 175 return sp_opr; 176 } 177 178 // TODO: Make sure that neither method handle intrinsics nor compiled lambda 179 // forms modify sp register (i.e., vmIntrinsics::{_invokeBasic, _linkToVirtual, 180 // _linkToStatic, _linkToSpecial, _linkToInterface, _compiledLambdaForm}) 181 LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { 182 return LIR_OprFact::illegalOpr; 183 } 184 185 // Return LIR_Opr corresponding to the given VMRegPair and data type 186 LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { 187 LIR_Opr opr = LIR_OprFact::illegalOpr; 188 VMReg r_1 = reg->first(); 189 VMReg r_2 = reg->second(); 190 if (r_1->is_stack()) { 191 // Convert stack slot to sp-based address. The calling convention does not 192 // count the SharedRuntime::out_preserve_stack_slots() value, so we must 193 // add it in here. 194 int st_off = 195 (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * 196 VMRegImpl::stack_slot_size; 197 opr = LIR_OprFact::address(new LIR_Address(sp_opr, st_off, type)); 198 } else if (r_1->is_Register()) { 199 Register reg1 = r_1->as_Register(); 200 #ifdef HARD_FLOAT_CC 201 if (type == T_DOUBLE || type == T_FLOAT) { 202 ShouldNotReachHere(); 203 } else 204 #endif 205 if (type == T_LONG || type == T_DOUBLE) { 206 assert(r_2->is_Register(), "wrong VMReg"); 207 Register reg2 = r_2->as_Register(); 208 opr = as_long_opr(reg1, reg2); 209 } else if (type == T_OBJECT || type == T_ARRAY) { 210 opr = as_oop_opr(reg1); 211 } else if (type == T_METADATA) { 212 opr = as_metadata_opr(reg1); 213 } else { 214 opr = as_opr(reg1); 215 } 216 } else if (r_1->is_FloatRegister()) { 217 int num = r_1->as_FloatRegister()->encoding(); 218 if (type == T_FLOAT) { 219 opr = LIR_OprFact::single_fpu(num); 220 } else { 221 assert(is_even(num) && r_2->as_FloatRegister()->encoding() == (num + 1), 222 "wrong VMReg"); 223 opr = LIR_OprFact::double_fpu(num, num + 1); 224 } 225 } else { 226 ShouldNotReachHere(); 227 } 228 return opr; 229 } 230 231 // Return VMReg corresponding to the given FPU register number as it is 232 // encoded in LIR_Opr. The conversion is straightforward because in this 233 // implementation the encoding of FPU registers in LIR_Opr's is the same as 234 // in FloatRegister's. 235 VMReg FrameMap::fpu_regname(int n) { 236 return as_FloatRegister(n)->as_VMReg(); 237 } 238 239 // Check that the frame is properly addressable on the platform. The sp-based 240 // address of every frame slot must have the offset expressible as AArch32's 241 // imm12 with the separately stored sign. 242 bool FrameMap::validate_frame() { 243 int max_offset = in_bytes(framesize_in_bytes()); 244 int java_index = 0; 245 for (int i = 0; i < _incoming_arguments->length(); i++) { 246 LIR_Opr opr = _incoming_arguments->at(i); 247 if (opr->is_stack()) { 248 max_offset = MAX2(_argument_locations->at(java_index), max_offset); 249 } 250 java_index += type2size[opr->type()]; 251 } 252 return Assembler::is_valid_for_offset_imm(max_offset, 12); 253 } 254 255 Address FrameMap::make_new_address(ByteSize sp_offset) const { 256 return Address(sp, in_bytes(sp_offset)); 257 }