1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "asm/codeBuffer.hpp" 29 #include "runtime/icache.hpp" 30 #include "runtime/os.hpp" 31 #ifdef TARGET_ARCH_x86 32 # include "assembler_x86.inline.hpp" 33 #endif 34 #ifdef TARGET_ARCH_sparc 35 # include "assembler_sparc.inline.hpp" 36 #endif 37 #ifdef TARGET_ARCH_zero 38 # include "assembler_zero.inline.hpp" 39 #endif 40 #ifdef TARGET_ARCH_arm 41 # include "assembler_arm.inline.hpp" 42 #endif 43 #ifdef TARGET_ARCH_ppc 44 # include "assembler_ppc.inline.hpp" 45 #endif 46 47 48 // Implementation of AbstractAssembler 49 // 50 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster, 51 // the assembler keeps a copy of the code buffers boundaries & modifies them when 52 // emitting bytes rather than using the code buffers accessor functions all the time. 53 // The code buffer is updated via set_code_end(...) after emitting a whole instruction. 54 55 AbstractAssembler::AbstractAssembler(CodeBuffer* code) { 56 if (code == NULL) return; 57 CodeSection* cs = code->insts(); 58 cs->clear_mark(); // new assembler kills old mark 59 _code_section = cs; 60 _code_begin = cs->start(); 61 _code_limit = cs->limit(); 62 _code_pos = cs->end(); 63 _oop_recorder= code->oop_recorder(); 64 DEBUG_ONLY( _short_branch_delta = 0; ) 65 if (_code_begin == NULL) { 66 vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s", 67 code->name())); 68 } 69 } 70 71 void AbstractAssembler::set_code_section(CodeSection* cs) { 72 assert(cs->outer() == code_section()->outer(), "sanity"); 73 assert(cs->is_allocated(), "need to pre-allocate this section"); 74 cs->clear_mark(); // new assembly into this section kills old mark 75 _code_section = cs; 76 _code_begin = cs->start(); 77 _code_limit = cs->limit(); 78 _code_pos = cs->end(); 79 } 80 81 // Inform CodeBuffer that incoming code and relocation will be for stubs 82 address AbstractAssembler::start_a_stub(int required_space) { 83 CodeBuffer* cb = code(); 84 CodeSection* cs = cb->stubs(); 85 assert(_code_section == cb->insts(), "not in insts?"); 86 sync(); 87 if (cs->maybe_expand_to_ensure_remaining(required_space) 88 && cb->blob() == NULL) { 89 return NULL; 90 } 91 set_code_section(cs); 92 return pc(); 93 } 94 95 // Inform CodeBuffer that incoming code and relocation will be code 96 // Should not be called if start_a_stub() returned NULL 97 void AbstractAssembler::end_a_stub() { 98 assert(_code_section == code()->stubs(), "not in stubs?"); 99 sync(); 100 set_code_section(code()->insts()); 101 } 102 103 // Inform CodeBuffer that incoming code and relocation will be for stubs 104 address AbstractAssembler::start_a_const(int required_space, int required_align) { 105 CodeBuffer* cb = code(); 106 CodeSection* cs = cb->consts(); 107 assert(_code_section == cb->insts(), "not in insts?"); 108 sync(); 109 address end = cs->end(); 110 int pad = -(intptr_t)end & (required_align-1); 111 if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) { 112 if (cb->blob() == NULL) return NULL; 113 end = cs->end(); // refresh pointer 114 } 115 if (pad > 0) { 116 while (--pad >= 0) { *end++ = 0; } 117 cs->set_end(end); 118 } 119 set_code_section(cs); 120 return end; 121 } 122 123 // Inform CodeBuffer that incoming code and relocation will be code 124 // Should not be called if start_a_const() returned NULL 125 void AbstractAssembler::end_a_const() { 126 assert(_code_section == code()->consts(), "not in consts?"); 127 sync(); 128 set_code_section(code()->insts()); 129 } 130 131 132 void AbstractAssembler::flush() { 133 sync(); 134 ICache::invalidate_range(addr_at(0), offset()); 135 } 136 137 138 void AbstractAssembler::a_byte(int x) { 139 emit_byte(x); 140 } 141 142 143 void AbstractAssembler::a_long(jint x) { 144 emit_long(x); 145 } 146 147 // Labels refer to positions in the (to be) generated code. There are bound 148 // and unbound 149 // 150 // Bound labels refer to known positions in the already generated code. 151 // offset() is the position the label refers to. 152 // 153 // Unbound labels refer to unknown positions in the code to be generated; it 154 // may contain a list of unresolved displacements that refer to it 155 #ifndef PRODUCT 156 void AbstractAssembler::print(Label& L) { 157 if (L.is_bound()) { 158 tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect()); 159 } else if (L.is_unbound()) { 160 L.print_instructions((MacroAssembler*)this); 161 } else { 162 tty->print_cr("label in inconsistent state (loc = %d)", L.loc()); 163 } 164 } 165 #endif // PRODUCT 166 167 168 void AbstractAssembler::bind(Label& L) { 169 if (L.is_bound()) { 170 // Assembler can bind a label more than once to the same place. 171 guarantee(L.loc() == locator(), "attempt to redefine label"); 172 return; 173 } 174 L.bind_loc(locator()); 175 L.patch_instructions((MacroAssembler*)this); 176 } 177 178 void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) { 179 if (UseStackBanging) { 180 // Each code entry causes one stack bang n pages down the stack where n 181 // is configurable by StackBangPages. The setting depends on the maximum 182 // depth of VM call stack or native before going back into java code, 183 // since only java code can raise a stack overflow exception using the 184 // stack banging mechanism. The VM and native code does not detect stack 185 // overflow. 186 // The code in JavaCalls::call() checks that there is at least n pages 187 // available, so all entry code needs to do is bang once for the end of 188 // this shadow zone. 189 // The entry code may need to bang additional pages if the framesize 190 // is greater than a page. 191 192 const int page_size = os::vm_page_size(); 193 int bang_end = StackShadowPages*page_size; 194 195 // This is how far the previous frame's stack banging extended. 196 const int bang_end_safe = bang_end; 197 198 if (frame_size_in_bytes > page_size) { 199 bang_end += frame_size_in_bytes; 200 } 201 202 int bang_offset = bang_end_safe; 203 while (bang_offset <= bang_end) { 204 // Need at least one stack bang at end of shadow zone. 205 bang_stack_with_offset(bang_offset); 206 bang_offset += page_size; 207 } 208 } // end (UseStackBanging) 209 } 210 211 void Label::add_patch_at(CodeBuffer* cb, int branch_loc) { 212 assert(_loc == -1, "Label is unbound"); 213 if (_patch_index < PatchCacheSize) { 214 _patches[_patch_index] = branch_loc; 215 } else { 216 if (_patch_overflow == NULL) { 217 _patch_overflow = cb->create_patch_overflow(); 218 } 219 _patch_overflow->push(branch_loc); 220 } 221 ++_patch_index; 222 } 223 224 void Label::patch_instructions(MacroAssembler* masm) { 225 assert(is_bound(), "Label is bound"); 226 CodeBuffer* cb = masm->code(); 227 int target_sect = CodeBuffer::locator_sect(loc()); 228 address target = cb->locator_address(loc()); 229 while (_patch_index > 0) { 230 --_patch_index; 231 int branch_loc; 232 if (_patch_index >= PatchCacheSize) { 233 branch_loc = _patch_overflow->pop(); 234 } else { 235 branch_loc = _patches[_patch_index]; 236 } 237 int branch_sect = CodeBuffer::locator_sect(branch_loc); 238 address branch = cb->locator_address(branch_loc); 239 if (branch_sect == CodeBuffer::SECT_CONSTS) { 240 // The thing to patch is a constant word. 241 *(address*)branch = target; 242 continue; 243 } 244 245 #ifdef ASSERT 246 // Cross-section branches only work if the 247 // intermediate section boundaries are frozen. 248 if (target_sect != branch_sect) { 249 for (int n = MIN2(target_sect, branch_sect), 250 nlimit = (target_sect + branch_sect) - n; 251 n < nlimit; n++) { 252 CodeSection* cs = cb->code_section(n); 253 assert(cs->is_frozen(), "cross-section branch needs stable offsets"); 254 } 255 } 256 #endif //ASSERT 257 258 // Push the target offset into the branch instruction. 259 masm->pd_patch_instruction(branch, target); 260 } 261 } 262 263 struct DelayedConstant { 264 typedef void (*value_fn_t)(); 265 BasicType type; 266 intptr_t value; 267 value_fn_t value_fn; 268 // This limit of 20 is generous for initial uses. 269 // The limit needs to be large enough to store the field offsets 270 // into classes which do not have statically fixed layouts. 271 // (Initial use is for method handle object offsets.) 272 // Look for uses of "delayed_value" in the source code 273 // and make sure this number is generous enough to handle all of them. 274 enum { DC_LIMIT = 20 }; 275 static DelayedConstant delayed_constants[DC_LIMIT]; 276 static DelayedConstant* add(BasicType type, value_fn_t value_fn); 277 bool match(BasicType t, value_fn_t cfn) { 278 return type == t && value_fn == cfn; 279 } 280 static void update_all(); 281 }; 282 283 DelayedConstant DelayedConstant::delayed_constants[DC_LIMIT]; 284 // Default C structure initialization rules have the following effect here: 285 // = { { (BasicType)0, (intptr_t)NULL }, ... }; 286 287 DelayedConstant* DelayedConstant::add(BasicType type, 288 DelayedConstant::value_fn_t cfn) { 289 for (int i = 0; i < DC_LIMIT; i++) { 290 DelayedConstant* dcon = &delayed_constants[i]; 291 if (dcon->match(type, cfn)) 292 return dcon; 293 if (dcon->value_fn == NULL) { 294 // (cmpxchg not because this is multi-threaded but because I'm paranoid) 295 if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) { 296 dcon->type = type; 297 return dcon; 298 } 299 } 300 } 301 // If this assert is hit (in pre-integration testing!) then re-evaluate 302 // the comment on the definition of DC_LIMIT. 303 guarantee(false, "too many delayed constants"); 304 return NULL; 305 } 306 307 void DelayedConstant::update_all() { 308 for (int i = 0; i < DC_LIMIT; i++) { 309 DelayedConstant* dcon = &delayed_constants[i]; 310 if (dcon->value_fn != NULL && dcon->value == 0) { 311 typedef int (*int_fn_t)(); 312 typedef address (*address_fn_t)(); 313 switch (dcon->type) { 314 case T_INT: dcon->value = (intptr_t) ((int_fn_t) dcon->value_fn)(); break; 315 case T_ADDRESS: dcon->value = (intptr_t) ((address_fn_t)dcon->value_fn)(); break; 316 } 317 } 318 } 319 } 320 321 intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) { 322 DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn); 323 return &dcon->value; 324 } 325 intptr_t* AbstractAssembler::delayed_value_addr(address(*value_fn)()) { 326 DelayedConstant* dcon = DelayedConstant::add(T_ADDRESS, (DelayedConstant::value_fn_t) value_fn); 327 return &dcon->value; 328 } 329 void AbstractAssembler::update_delayed_values() { 330 DelayedConstant::update_all(); 331 } 332 333 334 335 336 void AbstractAssembler::block_comment(const char* comment) { 337 if (sect() == CodeBuffer::SECT_INSTS) { 338 code_section()->outer()->block_comment(offset(), comment); 339 } 340 } 341 342 bool MacroAssembler::needs_explicit_null_check(intptr_t offset) { 343 // Exception handler checks the nmethod's implicit null checks table 344 // only when this method returns false. 345 #ifdef _LP64 346 if (UseCompressedOops && Universe::narrow_oop_base() != NULL) { 347 assert (Universe::heap() != NULL, "java heap should be initialized"); 348 // The first page after heap_base is unmapped and 349 // the 'offset' is equal to [heap_base + offset] for 350 // narrow oop implicit null checks. 351 uintptr_t base = (uintptr_t)Universe::narrow_oop_base(); 352 if ((uintptr_t)offset >= base) { 353 // Normalize offset for the next check. 354 offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1)); 355 } 356 } 357 #endif 358 return offset < 0 || os::vm_page_size() <= offset; 359 } 360 361 #ifndef PRODUCT 362 void Label::print_instructions(MacroAssembler* masm) const { 363 CodeBuffer* cb = masm->code(); 364 for (int i = 0; i < _patch_index; ++i) { 365 int branch_loc; 366 if (i >= PatchCacheSize) { 367 branch_loc = _patch_overflow->at(i - PatchCacheSize); 368 } else { 369 branch_loc = _patches[i]; 370 } 371 int branch_pos = CodeBuffer::locator_pos(branch_loc); 372 int branch_sect = CodeBuffer::locator_sect(branch_loc); 373 address branch = cb->locator_address(branch_loc); 374 tty->print_cr("unbound label"); 375 tty->print("@ %d|%d ", branch_pos, branch_sect); 376 if (branch_sect == CodeBuffer::SECT_CONSTS) { 377 tty->print_cr(PTR_FORMAT, *(address*)branch); 378 continue; 379 } 380 masm->pd_print_patched_instruction(branch); 381 tty->cr(); 382 } 383 } 384 #endif // ndef PRODUCT