1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/compressedOops.hpp" 32 #include "runtime/icache.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/thread.hpp" 35 36 37 // Implementation of AbstractAssembler 38 // 39 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster, 40 // the assembler keeps a copy of the code buffers boundaries & modifies them when 41 // emitting bytes rather than using the code buffers accessor functions all the time. 42 // The code buffer is updated via set_code_end(...) after emitting a whole instruction. 43 44 AbstractAssembler::AbstractAssembler(CodeBuffer* code) { 45 if (code == NULL) return; 46 CodeSection* cs = code->insts(); 47 cs->clear_mark(); // new assembler kills old mark 48 if (cs->start() == NULL) { 49 vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "CodeCache: no room for %s", code->name()); 50 } 51 _code_section = cs; 52 _oop_recorder= code->oop_recorder(); 53 DEBUG_ONLY( _short_branch_delta = 0; ) 54 } 55 56 void AbstractAssembler::set_code_section(CodeSection* cs) { 57 assert(cs->outer() == code_section()->outer(), "sanity"); 58 assert(cs->is_allocated(), "need to pre-allocate this section"); 59 cs->clear_mark(); // new assembly into this section kills old mark 60 _code_section = cs; 61 } 62 63 // Inform CodeBuffer that incoming code and relocation will be for stubs 64 address AbstractAssembler::start_a_stub(int required_space) { 65 CodeBuffer* cb = code(); 66 CodeSection* cs = cb->stubs(); 67 assert(_code_section == cb->insts(), "not in insts?"); 68 if (cs->maybe_expand_to_ensure_remaining(required_space) 69 && cb->blob() == NULL) { 70 return NULL; 71 } 72 set_code_section(cs); 73 return pc(); 74 } 75 76 // Inform CodeBuffer that incoming code and relocation will be code 77 // Should not be called if start_a_stub() returned NULL 78 void AbstractAssembler::end_a_stub() { 79 assert(_code_section == code()->stubs(), "not in stubs?"); 80 set_code_section(code()->insts()); 81 } 82 83 // Inform CodeBuffer that incoming code and relocation will be for stubs 84 address AbstractAssembler::start_a_const(int required_space, int required_align) { 85 CodeBuffer* cb = code(); 86 CodeSection* cs = cb->consts(); 87 assert(_code_section == cb->insts() || _code_section == cb->stubs(), "not in insts/stubs?"); 88 address end = cs->end(); 89 int pad = -(intptr_t)end & (required_align-1); 90 if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) { 91 if (cb->blob() == NULL) return NULL; 92 end = cs->end(); // refresh pointer 93 } 94 if (pad > 0) { 95 while (--pad >= 0) { *end++ = 0; } 96 cs->set_end(end); 97 } 98 set_code_section(cs); 99 return end; 100 } 101 102 // Inform CodeBuffer that incoming code and relocation will be code 103 // in section cs (insts or stubs). 104 void AbstractAssembler::end_a_const(CodeSection* cs) { 105 assert(_code_section == code()->consts(), "not in consts?"); 106 set_code_section(cs); 107 } 108 109 void AbstractAssembler::flush() { 110 ICache::invalidate_range(addr_at(0), offset()); 111 } 112 113 void AbstractAssembler::bind(Label& L) { 114 if (L.is_bound()) { 115 // Assembler can bind a label more than once to the same place. 116 guarantee(L.loc() == locator(), "attempt to redefine label"); 117 return; 118 } 119 L.bind_loc(locator()); 120 L.patch_instructions((MacroAssembler*)this); 121 } 122 123 void AbstractAssembler::generate_stack_overflow_check(int frame_size_in_bytes) { 124 if (UseStackBanging) { 125 // Each code entry causes one stack bang n pages down the stack where n 126 // is configurable by StackShadowPages. The setting depends on the maximum 127 // depth of VM call stack or native before going back into java code, 128 // since only java code can raise a stack overflow exception using the 129 // stack banging mechanism. The VM and native code does not detect stack 130 // overflow. 131 // The code in JavaCalls::call() checks that there is at least n pages 132 // available, so all entry code needs to do is bang once for the end of 133 // this shadow zone. 134 // The entry code may need to bang additional pages if the framesize 135 // is greater than a page. 136 137 const int page_size = os::vm_page_size(); 138 int bang_end = (int)JavaThread::stack_shadow_zone_size(); 139 140 // This is how far the previous frame's stack banging extended. 141 const int bang_end_safe = bang_end; 142 143 if (frame_size_in_bytes > page_size) { 144 bang_end += frame_size_in_bytes; 145 } 146 147 int bang_offset = bang_end_safe; 148 while (bang_offset <= bang_end) { 149 // Need at least one stack bang at end of shadow zone. 150 bang_stack_with_offset(bang_offset); 151 bang_offset += page_size; 152 } 153 } // end (UseStackBanging) 154 } 155 156 void Label::add_patch_at(CodeBuffer* cb, int branch_loc, const char* file, int line) { 157 assert(_loc == -1, "Label is unbound"); 158 // Don't add patch locations during scratch emit. 159 if (cb->insts()->scratch_emit()) { return; } 160 if (_patch_index < PatchCacheSize) { 161 _patches[_patch_index] = branch_loc; 162 #ifdef ASSERT 163 _lines[_patch_index] = line; 164 _files[_patch_index] = file; 165 #endif 166 } else { 167 if (_patch_overflow == NULL) { 168 _patch_overflow = cb->create_patch_overflow(); 169 } 170 _patch_overflow->push(branch_loc); 171 } 172 ++_patch_index; 173 } 174 175 void Label::patch_instructions(MacroAssembler* masm) { 176 assert(is_bound(), "Label is bound"); 177 CodeBuffer* cb = masm->code(); 178 int target_sect = CodeBuffer::locator_sect(loc()); 179 address target = cb->locator_address(loc()); 180 while (_patch_index > 0) { 181 --_patch_index; 182 int branch_loc; 183 int line = 0; 184 const char* file = NULL; 185 if (_patch_index >= PatchCacheSize) { 186 branch_loc = _patch_overflow->pop(); 187 } else { 188 branch_loc = _patches[_patch_index]; 189 #ifdef ASSERT 190 line = _lines[_patch_index]; 191 file = _files[_patch_index]; 192 #endif 193 } 194 int branch_sect = CodeBuffer::locator_sect(branch_loc); 195 address branch = cb->locator_address(branch_loc); 196 if (branch_sect == CodeBuffer::SECT_CONSTS) { 197 // The thing to patch is a constant word. 198 *(address*)branch = target; 199 continue; 200 } 201 202 #ifdef ASSERT 203 // Cross-section branches only work if the 204 // intermediate section boundaries are frozen. 205 if (target_sect != branch_sect) { 206 for (int n = MIN2(target_sect, branch_sect), 207 nlimit = (target_sect + branch_sect) - n; 208 n < nlimit; n++) { 209 CodeSection* cs = cb->code_section(n); 210 assert(cs->is_frozen(), "cross-section branch needs stable offsets"); 211 } 212 } 213 #endif //ASSERT 214 215 // Push the target offset into the branch instruction. 216 masm->pd_patch_instruction(branch, target, file, line); 217 } 218 } 219 220 void AbstractAssembler::block_comment(const char* comment) { 221 if (sect() == CodeBuffer::SECT_INSTS) { 222 code_section()->outer()->block_comment(offset(), comment); 223 } 224 } 225 226 const char* AbstractAssembler::code_string(const char* str) { 227 if (sect() == CodeBuffer::SECT_INSTS || sect() == CodeBuffer::SECT_STUBS) { 228 return code_section()->outer()->code_string(str); 229 } 230 return NULL; 231 } 232 233 bool MacroAssembler::uses_implicit_null_check(void* address) { 234 // Exception handler checks the nmethod's implicit null checks table 235 // only when this method returns false. 236 uintptr_t addr = reinterpret_cast<uintptr_t>(address); 237 uintptr_t page_size = (uintptr_t)os::vm_page_size(); 238 #ifdef _LP64 239 if (UseCompressedOops && CompressedOops::base() != NULL) { 240 // A SEGV can legitimately happen in C2 code at address 241 // (heap_base + offset) if Matcher::narrow_oop_use_complex_address 242 // is configured to allow narrow oops field loads to be implicitly 243 // null checked 244 uintptr_t start = (uintptr_t)CompressedOops::base(); 245 uintptr_t end = start + page_size; 246 if (addr >= start && addr < end) { 247 return true; 248 } 249 } 250 #endif 251 return addr < page_size; 252 } 253 254 bool MacroAssembler::needs_explicit_null_check(intptr_t offset) { 255 // The offset -1 is used (hardcoded) in a number of places in C1 and MacroAssembler 256 // to indicate an unknown offset. For example, TemplateTable::pop_and_check_object(Register r) 257 // calls MacroAssembler::null_check(Register reg, int offset = -1) which gets here 258 // with -1. Another example is GraphBuilder::access_field(...) which uses -1 as placeholder 259 // for offsets to be patched in later. The -1 there means the offset is not yet known 260 // and may lie outside of the zero-trapping page, and thus we need to ensure we're forcing 261 // an explicit null check for -1. 262 263 // Check if offset is outside of [0, os::vm_page_size()] 264 return offset < 0 || offset >= os::vm_page_size(); 265 }