1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)assembler.cpp 1.41 07/05/05 17:05:03 JVM" 3 #endif 4 /* 5 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 #include "incls/_precompiled.incl" 29 #include "incls/_assembler.cpp.incl" 30 31 32 // Implementation of AbstractAssembler 33 // 34 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster, 35 // the assembler keeps a copy of the code buffers boundaries & modifies them when 36 // emitting bytes rather than using the code buffers accessor functions all the time. 37 // The code buffer is updated via set_code_end(...) after emiting a whole instruction. 38 39 AbstractAssembler::AbstractAssembler(CodeBuffer* code) { 40 if (code == NULL) return; 41 CodeSection* cs = code->insts(); 42 cs->clear_mark(); // new assembler kills old mark 43 _code_section = cs; 44 _code_begin = cs->start(); 45 _code_limit = cs->limit(); 46 _code_pos = cs->end(); 47 _oop_recorder= code->oop_recorder(); 48 if (_code_begin == NULL) { 49 vm_exit_out_of_memory1(0, "CodeCache: no room for %s", code->name()); 50 } 51 } 52 53 void AbstractAssembler::set_code_section(CodeSection* cs) { 54 assert(cs->outer() == code_section()->outer(), "sanity"); 55 assert(cs->is_allocated(), "need to pre-allocate this section"); 56 cs->clear_mark(); // new assembly into this section kills old mark 57 _code_section = cs; 58 _code_begin = cs->start(); 59 _code_limit = cs->limit(); 60 _code_pos = cs->end(); 61 } 62 63 // Inform CodeBuffer that incoming code and relocation will be for stubs 64 address AbstractAssembler::start_a_stub(int required_space) { 65 CodeBuffer* cb = code(); 66 CodeSection* cs = cb->stubs(); 67 assert(_code_section == cb->insts(), "not in insts?"); 68 sync(); 69 if (cs->maybe_expand_to_ensure_remaining(required_space) 70 && cb->blob() == NULL) { 71 return NULL; 72 } 73 set_code_section(cs); 74 return pc(); 75 } 76 77 // Inform CodeBuffer that incoming code and relocation will be code 78 // Should not be called if start_a_stub() returned NULL 79 void AbstractAssembler::end_a_stub() { 80 assert(_code_section == code()->stubs(), "not in stubs?"); 81 sync(); 82 set_code_section(code()->insts()); 83 } 84 85 // Inform CodeBuffer that incoming code and relocation will be for stubs 86 address AbstractAssembler::start_a_const(int required_space, int required_align) { 87 CodeBuffer* cb = code(); 88 CodeSection* cs = cb->consts(); 89 assert(_code_section == cb->insts(), "not in insts?"); 90 sync(); 91 address end = cs->end(); 92 int pad = -(intptr_t)end & (required_align-1); 93 if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) { 94 if (cb->blob() == NULL) return NULL; 95 end = cs->end(); // refresh pointer 96 } 97 if (pad > 0) { 98 while (--pad >= 0) { *end++ = 0; } 99 cs->set_end(end); 100 } 101 set_code_section(cs); 102 return end; 103 } 104 105 // Inform CodeBuffer that incoming code and relocation will be code 106 // Should not be called if start_a_const() returned NULL 107 void AbstractAssembler::end_a_const() { 108 assert(_code_section == code()->consts(), "not in consts?"); 109 sync(); 110 set_code_section(code()->insts()); 111 } 112 113 114 void AbstractAssembler::flush() { 115 sync(); 116 ICache::invalidate_range(addr_at(0), offset()); 117 } 118 119 120 void AbstractAssembler::a_byte(int x) { 121 emit_byte(x); 122 } 123 124 125 void AbstractAssembler::a_long(jint x) { 126 emit_long(x); 127 } 128 129 // Labels refer to positions in the (to be) generated code. There are bound 130 // and unbound 131 // 132 // Bound labels refer to known positions in the already generated code. 133 // offset() is the position the label refers to. 134 // 135 // Unbound labels refer to unknown positions in the code to be generated; it 136 // may contain a list of unresolved displacements that refer to it 137 #ifndef PRODUCT 138 void AbstractAssembler::print(Label& L) { 139 if (L.is_bound()) { 140 tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect()); 141 } else if (L.is_unbound()) { 142 L.print_instructions((MacroAssembler*)this); 143 } else { 144 tty->print_cr("label in inconsistent state (loc = %d)", L.loc()); 145 } 146 } 147 #endif // PRODUCT 148 149 150 void AbstractAssembler::bind(Label& L) { 151 if (L.is_bound()) { 152 // Assembler can bind a label more than once to the same place. 153 guarantee(L.loc() == locator(), "attempt to redefine label"); 154 return; 155 } 156 L.bind_loc(locator()); 157 L.patch_instructions((MacroAssembler*)this); 158 } 159 160 void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) { 161 if (UseStackBanging) { 162 // Each code entry causes one stack bang n pages down the stack where n 163 // is configurable by StackBangPages. The setting depends on the maximum 164 // depth of VM call stack or native before going back into java code, 165 // since only java code can raise a stack overflow exception using the 166 // stack banging mechanism. The VM and native code does not detect stack 167 // overflow. 168 // The code in JavaCalls::call() checks that there is at least n pages 169 // available, so all entry code needs to do is bang once for the end of 170 // this shadow zone. 171 // The entry code may need to bang additional pages if the framesize 172 // is greater than a page. 173 174 const int page_size = os::vm_page_size(); 175 int bang_end = StackShadowPages*page_size; 176 177 // This is how far the previous frame's stack banging extended. 178 const int bang_end_safe = bang_end; 179 180 if (frame_size_in_bytes > page_size) { 181 bang_end += frame_size_in_bytes; 182 } 183 184 int bang_offset = bang_end_safe; 185 while (bang_offset <= bang_end) { 186 // Need at least one stack bang at end of shadow zone. 187 bang_stack_with_offset(bang_offset); 188 bang_offset += page_size; 189 } 190 } // end (UseStackBanging) 191 } 192 193 void Label::add_patch_at(CodeBuffer* cb, int branch_loc) { 194 assert(_loc == -1, "Label is unbound"); 195 if (_patch_index < PatchCacheSize) { 196 _patches[_patch_index] = branch_loc; 197 } else { 198 if (_patch_overflow == NULL) { 199 _patch_overflow = cb->create_patch_overflow(); 200 } 201 _patch_overflow->push(branch_loc); 202 } 203 ++_patch_index; 204 } 205 206 void Label::patch_instructions(MacroAssembler* masm) { 207 assert(is_bound(), "Label is bound"); 208 CodeBuffer* cb = masm->code(); 209 int target_sect = CodeBuffer::locator_sect(loc()); 210 address target = cb->locator_address(loc()); 211 while (_patch_index > 0) { 212 --_patch_index; 213 int branch_loc; 214 if (_patch_index >= PatchCacheSize) { 215 branch_loc = _patch_overflow->pop(); 216 } else { 217 branch_loc = _patches[_patch_index]; 218 } 219 int branch_sect = CodeBuffer::locator_sect(branch_loc); 220 address branch = cb->locator_address(branch_loc); 221 if (branch_sect == CodeBuffer::SECT_CONSTS) { 222 // The thing to patch is a constant word. 223 *(address*)branch = target; 224 continue; 225 } 226 227 #ifdef ASSERT 228 // Cross-section branches only work if the 229 // intermediate section boundaries are frozen. 230 if (target_sect != branch_sect) { 231 for (int n = MIN2(target_sect, branch_sect), 232 nlimit = (target_sect + branch_sect) - n; 233 n < nlimit; n++) { 234 CodeSection* cs = cb->code_section(n); 235 assert(cs->is_frozen(), "cross-section branch needs stable offsets"); 236 } 237 } 238 #endif //ASSERT 239 240 // Push the target offset into the branch instruction. 241 masm->pd_patch_instruction(branch, target); 242 } 243 } 244 245 246 void AbstractAssembler::block_comment(const char* comment) { 247 if (sect() == CodeBuffer::SECT_INSTS) { 248 code_section()->outer()->block_comment(offset(), comment); 249 } 250 } 251 252 253 #ifndef PRODUCT 254 void Label::print_instructions(MacroAssembler* masm) const { 255 CodeBuffer* cb = masm->code(); 256 for (int i = 0; i < _patch_index; ++i) { 257 int branch_loc; 258 if (i >= PatchCacheSize) { 259 branch_loc = _patch_overflow->at(i - PatchCacheSize); 260 } else { 261 branch_loc = _patches[i]; 262 } 263 int branch_pos = CodeBuffer::locator_pos(branch_loc); 264 int branch_sect = CodeBuffer::locator_sect(branch_loc); 265 address branch = cb->locator_address(branch_loc); 266 tty->print_cr("unbound label"); 267 tty->print("@ %d|%d ", branch_pos, branch_sect); 268 if (branch_sect == CodeBuffer::SECT_CONSTS) { 269 tty->print_cr(PTR_FORMAT, *(address*)branch); 270 continue; 271 } 272 masm->pd_print_patched_instruction(branch); 273 tty->cr(); 274 } 275 } 276 #endif // ndef PRODUCT