1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/relocInfo.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "interpreter/bytecode.hpp" 31 #include "memory/allocation.inline.hpp" 32 #include "memory/heap.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/forte.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/safepoint.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/vframe.hpp" 41 #include "services/memoryService.hpp" 42 #ifdef TARGET_ARCH_x86 43 # include "nativeInst_x86.hpp" 44 #endif 45 #ifdef TARGET_ARCH_sparc 46 # include "nativeInst_sparc.hpp" 47 #endif 48 #ifdef TARGET_ARCH_zero 49 # include "nativeInst_zero.hpp" 50 #endif 51 #ifdef TARGET_ARCH_arm 52 # include "nativeInst_arm.hpp" 53 #endif 54 #ifdef TARGET_ARCH_ppc 55 # include "nativeInst_ppc.hpp" 56 #endif 57 #ifdef COMPILER1 58 #include "c1/c1_Runtime1.hpp" 59 #endif 60 61 unsigned int align_code_offset(int offset) { 62 // align the size to CodeEntryAlignment 63 return 64 ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1)) 65 - (int)CodeHeap::header_size(); 66 } 67 68 69 // This must be consistent with the CodeBlob constructor's layout actions. 70 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 71 unsigned int size = header_size; 72 size += round_to(cb->total_relocation_size(), oopSize); 73 // align the size to CodeEntryAlignment 74 size = align_code_offset(size); 75 size += round_to(cb->total_content_size(), oopSize); 76 size += round_to(cb->total_oop_size(), oopSize); 77 size += round_to(cb->total_metadata_size(), oopSize); 78 return size; 79 } 80 81 82 // Creates a simple CodeBlob. Sets up the size of the different regions. 83 CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) { 84 assert(size == round_to(size, oopSize), "unaligned size"); 85 assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); 86 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 87 assert(!UseRelocIndex, "no space allocated for reloc index yet"); 88 89 // Note: If UseRelocIndex is enabled, there needs to be (at least) one 90 // extra word for the relocation information, containing the reloc 91 // index table length. Unfortunately, the reloc index table imple- 92 // mentation is not easily understandable and thus it is not clear 93 // what exactly the format is supposed to be. For now, we just turn 94 // off the use of this table (gri 7/6/2000). 95 96 _name = name; 97 _size = size; 98 _frame_complete_offset = frame_complete; 99 _header_size = header_size; 100 _relocation_size = locs_size; 101 _content_offset = align_code_offset(header_size + _relocation_size); 102 _code_offset = _content_offset; 103 _data_offset = size; 104 _frame_size = 0; 105 set_oop_maps(NULL); 106 } 107 108 109 // Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions, 110 // and copy code and relocation info. 111 CodeBlob::CodeBlob( 112 const char* name, 113 CodeBuffer* cb, 114 int header_size, 115 int size, 116 int frame_complete, 117 int frame_size, 118 OopMapSet* oop_maps 119 ) { 120 assert(size == round_to(size, oopSize), "unaligned size"); 121 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 122 123 _name = name; 124 _size = size; 125 _frame_complete_offset = frame_complete; 126 _header_size = header_size; 127 _relocation_size = round_to(cb->total_relocation_size(), oopSize); 128 _content_offset = align_code_offset(header_size + _relocation_size); 129 _code_offset = _content_offset + cb->total_offset_of(cb->insts()); 130 _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize); 131 assert(_data_offset <= size, "codeBlob is too small"); 132 133 cb->copy_code_and_locs_to(this); 134 set_oop_maps(oop_maps); 135 _frame_size = frame_size; 136 #ifdef COMPILER1 137 // probably wrong for tiered 138 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 139 #endif // COMPILER1 140 } 141 142 143 void CodeBlob::set_oop_maps(OopMapSet* p) { 144 // Danger Will Robinson! This method allocates a big 145 // chunk of memory, its your job to free it. 146 if (p != NULL) { 147 // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps 148 _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode); 149 p->copy_to((address)_oop_maps); 150 } else { 151 _oop_maps = NULL; 152 } 153 } 154 155 156 void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { 157 // Do not hold the CodeCache lock during name formatting. 158 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 159 160 if (stub != NULL) { 161 char stub_id[256]; 162 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 163 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 164 if (PrintStubCode) { 165 ttyLocker ttyl; 166 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); 167 Disassembler::decode(stub->code_begin(), stub->code_end()); 168 tty->cr(); 169 } 170 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 171 172 if (JvmtiExport::should_post_dynamic_code_generated()) { 173 const char* stub_name = name2; 174 if (name2[0] == '\0') stub_name = name1; 175 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 176 } 177 } 178 179 // Track memory usage statistic after releasing CodeCache_lock 180 MemoryService::track_code_cache_memory_usage(); 181 } 182 183 184 void CodeBlob::flush() { 185 if (_oop_maps) { 186 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode); 187 _oop_maps = NULL; 188 } 189 _strings.free(); 190 } 191 192 193 OopMap* CodeBlob::oop_map_for_return_address(address return_address) { 194 assert(oop_maps() != NULL, "nope"); 195 return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 196 } 197 198 199 //---------------------------------------------------------------------------------------------------- 200 // Implementation of BufferBlob 201 202 203 BufferBlob::BufferBlob(const char* name, int size) 204 : CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 205 {} 206 207 BufferBlob* BufferBlob::create(const char* name, int buffer_size) { 208 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 209 210 BufferBlob* blob = NULL; 211 unsigned int size = sizeof(BufferBlob); 212 // align the size to CodeEntryAlignment 213 size = align_code_offset(size); 214 size += round_to(buffer_size, oopSize); 215 assert(name != NULL, "must provide a name"); 216 { 217 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 218 blob = new (size) BufferBlob(name, size); 219 } 220 // Track memory usage statistic after releasing CodeCache_lock 221 MemoryService::track_code_cache_memory_usage(); 222 223 return blob; 224 } 225 226 227 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) 228 : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL) 229 {} 230 231 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 232 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 233 234 BufferBlob* blob = NULL; 235 unsigned int size = allocation_size(cb, sizeof(BufferBlob)); 236 assert(name != NULL, "must provide a name"); 237 { 238 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 239 blob = new (size) BufferBlob(name, size, cb); 240 } 241 // Track memory usage statistic after releasing CodeCache_lock 242 MemoryService::track_code_cache_memory_usage(); 243 244 return blob; 245 } 246 247 248 void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() { 249 void* p = CodeCache::allocate(size, is_critical); 250 return p; 251 } 252 253 254 void BufferBlob::free( BufferBlob *blob ) { 255 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 256 blob->flush(); 257 { 258 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 259 CodeCache::free((CodeBlob*)blob); 260 } 261 // Track memory usage statistic after releasing CodeCache_lock 262 MemoryService::track_code_cache_memory_usage(); 263 } 264 265 266 //---------------------------------------------------------------------------------------------------- 267 // Implementation of AdapterBlob 268 269 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : 270 BufferBlob("I2C/C2I adapters", size, cb) { 271 CodeCache::commit(this); 272 } 273 274 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { 275 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 276 277 AdapterBlob* blob = NULL; 278 unsigned int size = allocation_size(cb, sizeof(AdapterBlob)); 279 { 280 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 281 // The parameter 'true' indicates a critical memory allocation. 282 // This means that CodeCacheMinimumFreeSpace is used, if necessary 283 const bool is_critical = true; 284 blob = new (size, is_critical) AdapterBlob(size, cb); 285 } 286 // Track memory usage statistic after releasing CodeCache_lock 287 MemoryService::track_code_cache_memory_usage(); 288 289 return blob; 290 } 291 292 293 //---------------------------------------------------------------------------------------------------- 294 // Implementation of MethodHandlesAdapterBlob 295 296 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 297 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 298 299 MethodHandlesAdapterBlob* blob = NULL; 300 unsigned int size = sizeof(MethodHandlesAdapterBlob); 301 // align the size to CodeEntryAlignment 302 size = align_code_offset(size); 303 size += round_to(buffer_size, oopSize); 304 { 305 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 306 // The parameter 'true' indicates a critical memory allocation. 307 // This means that CodeCacheMinimumFreeSpace is used, if necessary 308 const bool is_critical = true; 309 blob = new (size, is_critical) MethodHandlesAdapterBlob(size); 310 } 311 // Track memory usage statistic after releasing CodeCache_lock 312 MemoryService::track_code_cache_memory_usage(); 313 314 return blob; 315 } 316 317 318 //---------------------------------------------------------------------------------------------------- 319 // Implementation of RuntimeStub 320 321 RuntimeStub::RuntimeStub( 322 const char* name, 323 CodeBuffer* cb, 324 int size, 325 int frame_complete, 326 int frame_size, 327 OopMapSet* oop_maps, 328 bool caller_must_gc_arguments 329 ) 330 : CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps) 331 { 332 _caller_must_gc_arguments = caller_must_gc_arguments; 333 } 334 335 336 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 337 CodeBuffer* cb, 338 int frame_complete, 339 int frame_size, 340 OopMapSet* oop_maps, 341 bool caller_must_gc_arguments) 342 { 343 RuntimeStub* stub = NULL; 344 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 345 { 346 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 347 unsigned int size = allocation_size(cb, sizeof(RuntimeStub)); 348 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 349 } 350 351 trace_new_stub(stub, "RuntimeStub - ", stub_name); 352 353 return stub; 354 } 355 356 357 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 358 void* p = CodeCache::allocate(size, true); 359 if (!p) fatal("Initial size of CodeCache is too small"); 360 return p; 361 } 362 363 // operator new shared by all singletons: 364 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 365 void* p = CodeCache::allocate(size, true); 366 if (!p) fatal("Initial size of CodeCache is too small"); 367 return p; 368 } 369 370 371 //---------------------------------------------------------------------------------------------------- 372 // Implementation of DeoptimizationBlob 373 374 DeoptimizationBlob::DeoptimizationBlob( 375 CodeBuffer* cb, 376 int size, 377 OopMapSet* oop_maps, 378 int unpack_offset, 379 int unpack_with_exception_offset, 380 int unpack_with_reexecution_offset, 381 int frame_size 382 ) 383 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 384 { 385 _unpack_offset = unpack_offset; 386 _unpack_with_exception = unpack_with_exception_offset; 387 _unpack_with_reexecution = unpack_with_reexecution_offset; 388 #ifdef COMPILER1 389 _unpack_with_exception_in_tls = -1; 390 #endif 391 } 392 393 394 DeoptimizationBlob* DeoptimizationBlob::create( 395 CodeBuffer* cb, 396 OopMapSet* oop_maps, 397 int unpack_offset, 398 int unpack_with_exception_offset, 399 int unpack_with_reexecution_offset, 400 int frame_size) 401 { 402 DeoptimizationBlob* blob = NULL; 403 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 404 { 405 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 406 unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob)); 407 blob = new (size) DeoptimizationBlob(cb, 408 size, 409 oop_maps, 410 unpack_offset, 411 unpack_with_exception_offset, 412 unpack_with_reexecution_offset, 413 frame_size); 414 } 415 416 trace_new_stub(blob, "DeoptimizationBlob"); 417 418 return blob; 419 } 420 421 422 //---------------------------------------------------------------------------------------------------- 423 // Implementation of UncommonTrapBlob 424 425 #ifdef COMPILER2 426 UncommonTrapBlob::UncommonTrapBlob( 427 CodeBuffer* cb, 428 int size, 429 OopMapSet* oop_maps, 430 int frame_size 431 ) 432 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 433 {} 434 435 436 UncommonTrapBlob* UncommonTrapBlob::create( 437 CodeBuffer* cb, 438 OopMapSet* oop_maps, 439 int frame_size) 440 { 441 UncommonTrapBlob* blob = NULL; 442 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 443 { 444 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 445 unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob)); 446 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 447 } 448 449 trace_new_stub(blob, "UncommonTrapBlob"); 450 451 return blob; 452 } 453 454 455 #endif // COMPILER2 456 457 458 //---------------------------------------------------------------------------------------------------- 459 // Implementation of ExceptionBlob 460 461 #ifdef COMPILER2 462 ExceptionBlob::ExceptionBlob( 463 CodeBuffer* cb, 464 int size, 465 OopMapSet* oop_maps, 466 int frame_size 467 ) 468 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 469 {} 470 471 472 ExceptionBlob* ExceptionBlob::create( 473 CodeBuffer* cb, 474 OopMapSet* oop_maps, 475 int frame_size) 476 { 477 ExceptionBlob* blob = NULL; 478 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 479 { 480 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 481 unsigned int size = allocation_size(cb, sizeof(ExceptionBlob)); 482 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 483 } 484 485 trace_new_stub(blob, "ExceptionBlob"); 486 487 return blob; 488 } 489 490 491 #endif // COMPILER2 492 493 494 //---------------------------------------------------------------------------------------------------- 495 // Implementation of SafepointBlob 496 497 SafepointBlob::SafepointBlob( 498 CodeBuffer* cb, 499 int size, 500 OopMapSet* oop_maps, 501 int frame_size 502 ) 503 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 504 {} 505 506 507 SafepointBlob* SafepointBlob::create( 508 CodeBuffer* cb, 509 OopMapSet* oop_maps, 510 int frame_size) 511 { 512 SafepointBlob* blob = NULL; 513 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 514 { 515 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 516 unsigned int size = allocation_size(cb, sizeof(SafepointBlob)); 517 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 518 } 519 520 trace_new_stub(blob, "SafepointBlob"); 521 522 return blob; 523 } 524 525 526 //---------------------------------------------------------------------------------------------------- 527 // Verification and printing 528 529 void CodeBlob::verify() { 530 ShouldNotReachHere(); 531 } 532 533 void CodeBlob::print_on(outputStream* st) const { 534 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 535 st->print_cr("Framesize: %d", _frame_size); 536 } 537 538 void CodeBlob::print_value_on(outputStream* st) const { 539 st->print_cr("[CodeBlob]"); 540 } 541 542 void BufferBlob::verify() { 543 // unimplemented 544 } 545 546 void BufferBlob::print_on(outputStream* st) const { 547 CodeBlob::print_on(st); 548 print_value_on(st); 549 } 550 551 void BufferBlob::print_value_on(outputStream* st) const { 552 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 553 } 554 555 void RuntimeStub::verify() { 556 // unimplemented 557 } 558 559 void RuntimeStub::print_on(outputStream* st) const { 560 ttyLocker ttyl; 561 CodeBlob::print_on(st); 562 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 563 st->print_cr("%s", name()); 564 Disassembler::decode((CodeBlob*)this, st); 565 } 566 567 void RuntimeStub::print_value_on(outputStream* st) const { 568 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 569 } 570 571 void SingletonBlob::verify() { 572 // unimplemented 573 } 574 575 void SingletonBlob::print_on(outputStream* st) const { 576 ttyLocker ttyl; 577 CodeBlob::print_on(st); 578 st->print_cr("%s", name()); 579 Disassembler::decode((CodeBlob*)this, st); 580 } 581 582 void SingletonBlob::print_value_on(outputStream* st) const { 583 st->print_cr("%s", name()); 584 } 585 586 void DeoptimizationBlob::print_value_on(outputStream* st) const { 587 st->print_cr("Deoptimization (frame not available)"); 588 }