1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "asm/codeBuffer.hpp" 29 #include "runtime/atomic.hpp" 30 #include "runtime/atomic.inline.hpp" 31 #include "runtime/icache.hpp" 32 #include "runtime/os.hpp" 33 34 35 // Implementation of AbstractAssembler 36 // 37 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster, 38 // the assembler keeps a copy of the code buffers boundaries & modifies them when 39 // emitting bytes rather than using the code buffers accessor functions all the time. 40 // The code buffer is updated via set_code_end(...) after emitting a whole instruction. 41 42 AbstractAssembler::AbstractAssembler(CodeBuffer* code) { 43 if (code == NULL) return; 44 CodeSection* cs = code->insts(); 45 cs->clear_mark(); // new assembler kills old mark 46 if (cs->start() == NULL) { 47 vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s", 48 code->name())); 49 } 50 _code_section = cs; 51 _oop_recorder= code->oop_recorder(); 52 DEBUG_ONLY( _short_branch_delta = 0; ) 53 } 54 55 void AbstractAssembler::set_code_section(CodeSection* cs) { 56 assert(cs->outer() == code_section()->outer(), "sanity"); 57 assert(cs->is_allocated(), "need to pre-allocate this section"); 58 cs->clear_mark(); // new assembly into this section kills old mark 59 _code_section = cs; 60 } 61 62 // Inform CodeBuffer that incoming code and relocation will be for stubs 63 address AbstractAssembler::start_a_stub(int required_space) { 64 CodeBuffer* cb = code(); 65 CodeSection* cs = cb->stubs(); 66 assert(_code_section == cb->insts(), "not in insts?"); 67 if (cs->maybe_expand_to_ensure_remaining(required_space) 68 && cb->blob() == NULL) { 69 return NULL; 70 } 71 set_code_section(cs); 72 return pc(); 73 } 74 75 // Inform CodeBuffer that incoming code and relocation will be code 76 // Should not be called if start_a_stub() returned NULL 77 void AbstractAssembler::end_a_stub() { 78 assert(_code_section == code()->stubs(), "not in stubs?"); 79 set_code_section(code()->insts()); 80 } 81 82 // Inform CodeBuffer that incoming code and relocation will be for stubs 83 address AbstractAssembler::start_a_const(int required_space, int required_align) { 84 CodeBuffer* cb = code(); 85 CodeSection* cs = cb->consts(); 86 assert(_code_section == cb->insts() || _code_section == cb->stubs(), "not in insts/stubs?"); 87 address end = cs->end(); 88 int pad = -(intptr_t)end & (required_align-1); 89 if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) { 90 if (cb->blob() == NULL) return NULL; 91 end = cs->end(); // refresh pointer 92 } 93 if (pad > 0) { 94 while (--pad >= 0) { *end++ = 0; } 95 cs->set_end(end); 96 } 97 set_code_section(cs); 98 return end; 99 } 100 101 // Inform CodeBuffer that incoming code and relocation will be code 102 // in section cs (insts or stubs). 103 void AbstractAssembler::end_a_const(CodeSection* cs) { 104 assert(_code_section == code()->consts(), "not in consts?"); 105 set_code_section(cs); 106 } 107 108 void AbstractAssembler::flush() { 109 ICache::invalidate_range(addr_at(0), offset()); 110 } 111 112 113 void AbstractAssembler::a_byte(int x) { 114 emit_byte(x); 115 } 116 117 118 void AbstractAssembler::a_long(jint x) { 119 emit_long(x); 120 } 121 122 // Labels refer to positions in the (to be) generated code. There are bound 123 // and unbound 124 // 125 // Bound labels refer to known positions in the already generated code. 126 // offset() is the position the label refers to. 127 // 128 // Unbound labels refer to unknown positions in the code to be generated; it 129 // may contain a list of unresolved displacements that refer to it 130 #ifndef PRODUCT 131 void AbstractAssembler::print(Label& L) { 132 if (L.is_bound()) { 133 tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect()); 134 } else if (L.is_unbound()) { 135 L.print_instructions((MacroAssembler*)this); 136 } else { 137 tty->print_cr("label in inconsistent state (loc = %d)", L.loc()); 138 } 139 } 140 #endif // PRODUCT 141 142 143 void AbstractAssembler::bind(Label& L) { 144 if (L.is_bound()) { 145 // Assembler can bind a label more than once to the same place. 146 guarantee(L.loc() == locator(), "attempt to redefine label"); 147 return; 148 } 149 L.bind_loc(locator()); 150 L.patch_instructions((MacroAssembler*)this); 151 } 152 153 void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) { 154 if (UseStackBanging) { 155 // Each code entry causes one stack bang n pages down the stack where n 156 // is configurable by StackBangPages. The setting depends on the maximum 157 // depth of VM call stack or native before going back into java code, 158 // since only java code can raise a stack overflow exception using the 159 // stack banging mechanism. The VM and native code does not detect stack 160 // overflow. 161 // The code in JavaCalls::call() checks that there is at least n pages 162 // available, so all entry code needs to do is bang once for the end of 163 // this shadow zone. 164 // The entry code may need to bang additional pages if the framesize 165 // is greater than a page. 166 167 const int page_size = os::vm_page_size(); 168 int bang_end = StackShadowPages*page_size; 169 170 // This is how far the previous frame's stack banging extended. 171 const int bang_end_safe = bang_end; 172 173 if (frame_size_in_bytes > page_size) { 174 bang_end += frame_size_in_bytes; 175 } 176 177 int bang_offset = bang_end_safe; 178 while (bang_offset <= bang_end) { 179 // Need at least one stack bang at end of shadow zone. 180 bang_stack_with_offset(bang_offset); 181 bang_offset += page_size; 182 } 183 } // end (UseStackBanging) 184 } 185 186 void Label::add_patch_at(CodeBuffer* cb, int branch_loc) { 187 assert(_loc == -1, "Label is unbound"); 188 if (_patch_index < PatchCacheSize) { 189 _patches[_patch_index] = branch_loc; 190 } else { 191 if (_patch_overflow == NULL) { 192 _patch_overflow = cb->create_patch_overflow(); 193 } 194 _patch_overflow->push(branch_loc); 195 } 196 ++_patch_index; 197 } 198 199 void Label::patch_instructions(MacroAssembler* masm) { 200 assert(is_bound(), "Label is bound"); 201 CodeBuffer* cb = masm->code(); 202 int target_sect = CodeBuffer::locator_sect(loc()); 203 address target = cb->locator_address(loc()); 204 while (_patch_index > 0) { 205 --_patch_index; 206 int branch_loc; 207 if (_patch_index >= PatchCacheSize) { 208 branch_loc = _patch_overflow->pop(); 209 } else { 210 branch_loc = _patches[_patch_index]; 211 } 212 int branch_sect = CodeBuffer::locator_sect(branch_loc); 213 address branch = cb->locator_address(branch_loc); 214 if (branch_sect == CodeBuffer::SECT_CONSTS) { 215 // The thing to patch is a constant word. 216 *(address*)branch = target; 217 continue; 218 } 219 220 #ifdef ASSERT 221 // Cross-section branches only work if the 222 // intermediate section boundaries are frozen. 223 if (target_sect != branch_sect) { 224 for (int n = MIN2(target_sect, branch_sect), 225 nlimit = (target_sect + branch_sect) - n; 226 n < nlimit; n++) { 227 CodeSection* cs = cb->code_section(n); 228 assert(cs->is_frozen(), "cross-section branch needs stable offsets"); 229 } 230 } 231 #endif //ASSERT 232 233 // Push the target offset into the branch instruction. 234 masm->pd_patch_instruction(branch, target); 235 } 236 } 237 238 struct DelayedConstant { 239 typedef void (*value_fn_t)(); 240 BasicType type; 241 intptr_t value; 242 value_fn_t value_fn; 243 // This limit of 20 is generous for initial uses. 244 // The limit needs to be large enough to store the field offsets 245 // into classes which do not have statically fixed layouts. 246 // (Initial use is for method handle object offsets.) 247 // Look for uses of "delayed_value" in the source code 248 // and make sure this number is generous enough to handle all of them. 249 enum { DC_LIMIT = 20 }; 250 static DelayedConstant delayed_constants[DC_LIMIT]; 251 static DelayedConstant* add(BasicType type, value_fn_t value_fn); 252 bool match(BasicType t, value_fn_t cfn) { 253 return type == t && value_fn == cfn; 254 } 255 static void update_all(); 256 }; 257 258 DelayedConstant DelayedConstant::delayed_constants[DC_LIMIT]; 259 // Default C structure initialization rules have the following effect here: 260 // = { { (BasicType)0, (intptr_t)NULL }, ... }; 261 262 DelayedConstant* DelayedConstant::add(BasicType type, 263 DelayedConstant::value_fn_t cfn) { 264 for (int i = 0; i < DC_LIMIT; i++) { 265 DelayedConstant* dcon = &delayed_constants[i]; 266 if (dcon->match(type, cfn)) 267 return dcon; 268 if (dcon->value_fn == NULL) { 269 // (cmpxchg not because this is multi-threaded but because I'm paranoid) 270 if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) { 271 dcon->type = type; 272 return dcon; 273 } 274 } 275 } 276 // If this assert is hit (in pre-integration testing!) then re-evaluate 277 // the comment on the definition of DC_LIMIT. 278 guarantee(false, "too many delayed constants"); 279 return NULL; 280 } 281 282 void DelayedConstant::update_all() { 283 for (int i = 0; i < DC_LIMIT; i++) { 284 DelayedConstant* dcon = &delayed_constants[i]; 285 if (dcon->value_fn != NULL && dcon->value == 0) { 286 typedef int (*int_fn_t)(); 287 typedef address (*address_fn_t)(); 288 switch (dcon->type) { 289 case T_INT: dcon->value = (intptr_t) ((int_fn_t) dcon->value_fn)(); break; 290 case T_ADDRESS: dcon->value = (intptr_t) ((address_fn_t)dcon->value_fn)(); break; 291 } 292 } 293 } 294 } 295 296 RegisterOrConstant AbstractAssembler::delayed_value(int(*value_fn)(), Register tmp, int offset) { 297 intptr_t val = (intptr_t) (*value_fn)(); 298 if (val != 0) return val + offset; 299 return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); 300 } 301 RegisterOrConstant AbstractAssembler::delayed_value(address(*value_fn)(), Register tmp, int offset) { 302 intptr_t val = (intptr_t) (*value_fn)(); 303 if (val != 0) return val + offset; 304 return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); 305 } 306 intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) { 307 DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn); 308 return &dcon->value; 309 } 310 intptr_t* AbstractAssembler::delayed_value_addr(address(*value_fn)()) { 311 DelayedConstant* dcon = DelayedConstant::add(T_ADDRESS, (DelayedConstant::value_fn_t) value_fn); 312 return &dcon->value; 313 } 314 void AbstractAssembler::update_delayed_values() { 315 DelayedConstant::update_all(); 316 } 317 318 319 320 321 void AbstractAssembler::block_comment(const char* comment) { 322 if (sect() == CodeBuffer::SECT_INSTS) { 323 code_section()->outer()->block_comment(offset(), comment); 324 } 325 } 326 327 bool MacroAssembler::needs_explicit_null_check(intptr_t offset) { 328 // Exception handler checks the nmethod's implicit null checks table 329 // only when this method returns false. 330 #ifdef _LP64 331 if (UseCompressedOops && Universe::narrow_oop_base() != NULL) { 332 assert (Universe::heap() != NULL, "java heap should be initialized"); 333 // The first page after heap_base is unmapped and 334 // the 'offset' is equal to [heap_base + offset] for 335 // narrow oop implicit null checks. 336 uintptr_t base = (uintptr_t)Universe::narrow_oop_base(); 337 if ((uintptr_t)offset >= base) { 338 // Normalize offset for the next check. 339 offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1)); 340 } 341 } 342 #endif 343 return offset < 0 || os::vm_page_size() <= offset; 344 } 345 346 #ifndef PRODUCT 347 void Label::print_instructions(MacroAssembler* masm) const { 348 CodeBuffer* cb = masm->code(); 349 for (int i = 0; i < _patch_index; ++i) { 350 int branch_loc; 351 if (i >= PatchCacheSize) { 352 branch_loc = _patch_overflow->at(i - PatchCacheSize); 353 } else { 354 branch_loc = _patches[i]; 355 } 356 int branch_pos = CodeBuffer::locator_pos(branch_loc); 357 int branch_sect = CodeBuffer::locator_sect(branch_loc); 358 address branch = cb->locator_address(branch_loc); 359 tty->print_cr("unbound label"); 360 tty->print("@ %d|%d ", branch_pos, branch_sect); 361 if (branch_sect == CodeBuffer::SECT_CONSTS) { 362 tty->print_cr(PTR_FORMAT, *(address*)branch); 363 continue; 364 } 365 masm->pd_print_patched_instruction(branch); 366 tty->cr(); 367 } 368 } 369 #endif // ndef PRODUCT