1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/relocInfo.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "interpreter/bytecode.hpp" 31 #include "memory/allocation.inline.hpp" 32 #include "memory/heap.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/forte.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/safepoint.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/vframe.hpp" 41 #include "services/memoryService.hpp" 42 #ifdef TARGET_ARCH_x86 43 # include "nativeInst_x86.hpp" 44 #endif 45 #ifdef TARGET_ARCH_aarch64 46 # include "nativeInst_aarch64.hpp" 47 #endif 48 #ifdef TARGET_ARCH_sparc 49 # include "nativeInst_sparc.hpp" 50 #endif 51 #ifdef TARGET_ARCH_zero 52 # include "nativeInst_zero.hpp" 53 #endif 54 #ifdef TARGET_ARCH_arm 55 # include "nativeInst_arm.hpp" 56 #endif 57 #ifdef TARGET_ARCH_ppc 58 # include "nativeInst_ppc.hpp" 59 #endif 60 #ifdef COMPILER1 61 #include "c1/c1_Runtime1.hpp" 62 #endif 63 64 unsigned int CodeBlob::align_code_offset(int offset) { 65 // align the size to CodeEntryAlignment 66 return 67 ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1)) 68 - (int)CodeHeap::header_size(); 69 } 70 71 72 // This must be consistent with the CodeBlob constructor's layout actions. 73 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 74 unsigned int size = header_size; 75 size += round_to(cb->total_relocation_size(), oopSize); 76 // align the size to CodeEntryAlignment 77 size = align_code_offset(size); 78 size += round_to(cb->total_content_size(), oopSize); 79 size += round_to(cb->total_oop_size(), oopSize); 80 size += round_to(cb->total_metadata_size(), oopSize); 81 return size; 82 } 83 84 85 // Creates a simple CodeBlob. Sets up the size of the different regions. 86 CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) { 87 assert(size == round_to(size, oopSize), "unaligned size"); 88 assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); 89 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 90 assert(!UseRelocIndex, "no space allocated for reloc index yet"); 91 92 // Note: If UseRelocIndex is enabled, there needs to be (at least) one 93 // extra word for the relocation information, containing the reloc 94 // index table length. Unfortunately, the reloc index table imple- 95 // mentation is not easily understandable and thus it is not clear 96 // what exactly the format is supposed to be. For now, we just turn 97 // off the use of this table (gri 7/6/2000). 98 99 _name = name; 100 _size = size; 101 _frame_complete_offset = frame_complete; 102 _header_size = header_size; 103 _relocation_size = locs_size; 104 _content_offset = align_code_offset(header_size + _relocation_size); 105 _code_offset = _content_offset; 106 _data_offset = size; 107 _frame_size = 0; 108 set_oop_maps(NULL); 109 } 110 111 112 // Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions, 113 // and copy code and relocation info. 114 CodeBlob::CodeBlob( 115 const char* name, 116 CodeBuffer* cb, 117 int header_size, 118 int size, 119 int frame_complete, 120 int frame_size, 121 OopMapSet* oop_maps 122 ) { 123 assert(size == round_to(size, oopSize), "unaligned size"); 124 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 125 126 _name = name; 127 _size = size; 128 _frame_complete_offset = frame_complete; 129 _header_size = header_size; 130 _relocation_size = round_to(cb->total_relocation_size(), oopSize); 131 _content_offset = align_code_offset(header_size + _relocation_size); 132 _code_offset = _content_offset + cb->total_offset_of(cb->insts()); 133 _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize); 134 assert(_data_offset <= size, "codeBlob is too small"); 135 136 cb->copy_code_and_locs_to(this); 137 set_oop_maps(oop_maps); 138 _frame_size = frame_size; 139 #ifdef COMPILER1 140 // probably wrong for tiered 141 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 142 #endif // COMPILER1 143 } 144 145 146 void CodeBlob::set_oop_maps(OopMapSet* p) { 147 // Danger Will Robinson! This method allocates a big 148 // chunk of memory, its your job to free it. 149 if (p != NULL) { 150 // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps 151 _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode); 152 p->copy_to((address)_oop_maps); 153 } else { 154 _oop_maps = NULL; 155 } 156 } 157 158 159 void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { 160 // Do not hold the CodeCache lock during name formatting. 161 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 162 163 if (stub != NULL) { 164 char stub_id[256]; 165 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 166 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 167 if (PrintStubCode) { 168 ttyLocker ttyl; 169 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); 170 Disassembler::decode(stub->code_begin(), stub->code_end()); 171 tty->cr(); 172 } 173 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 174 175 if (JvmtiExport::should_post_dynamic_code_generated()) { 176 const char* stub_name = name2; 177 if (name2[0] == '\0') stub_name = name1; 178 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 179 } 180 } 181 182 // Track memory usage statistic after releasing CodeCache_lock 183 MemoryService::track_code_cache_memory_usage(); 184 } 185 186 187 void CodeBlob::flush() { 188 if (_oop_maps) { 189 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode); 190 _oop_maps = NULL; 191 } 192 _strings.free(); 193 } 194 195 196 OopMap* CodeBlob::oop_map_for_return_address(address return_address) { 197 assert(oop_maps() != NULL, "nope"); 198 return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 199 } 200 201 202 //---------------------------------------------------------------------------------------------------- 203 // Implementation of BufferBlob 204 205 206 BufferBlob::BufferBlob(const char* name, int size) 207 : CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 208 {} 209 210 BufferBlob* BufferBlob::create(const char* name, int buffer_size) { 211 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 212 213 BufferBlob* blob = NULL; 214 unsigned int size = sizeof(BufferBlob); 215 // align the size to CodeEntryAlignment 216 size = align_code_offset(size); 217 size += round_to(buffer_size, oopSize); 218 assert(name != NULL, "must provide a name"); 219 { 220 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 221 blob = new (size) BufferBlob(name, size); 222 } 223 // Track memory usage statistic after releasing CodeCache_lock 224 MemoryService::track_code_cache_memory_usage(); 225 226 return blob; 227 } 228 229 230 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) 231 : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL) 232 {} 233 234 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 235 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 236 237 BufferBlob* blob = NULL; 238 unsigned int size = allocation_size(cb, sizeof(BufferBlob)); 239 assert(name != NULL, "must provide a name"); 240 { 241 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 242 blob = new (size) BufferBlob(name, size, cb); 243 } 244 // Track memory usage statistic after releasing CodeCache_lock 245 MemoryService::track_code_cache_memory_usage(); 246 247 return blob; 248 } 249 250 251 void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() { 252 void* p = CodeCache::allocate(size, is_critical); 253 return p; 254 } 255 256 257 void BufferBlob::free( BufferBlob *blob ) { 258 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 259 blob->flush(); 260 { 261 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 262 CodeCache::free((CodeBlob*)blob); 263 } 264 // Track memory usage statistic after releasing CodeCache_lock 265 MemoryService::track_code_cache_memory_usage(); 266 } 267 268 269 //---------------------------------------------------------------------------------------------------- 270 // Implementation of AdapterBlob 271 272 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : 273 BufferBlob("I2C/C2I adapters", size, cb) { 274 CodeCache::commit(this); 275 } 276 277 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { 278 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 279 280 AdapterBlob* blob = NULL; 281 unsigned int size = allocation_size(cb, sizeof(AdapterBlob)); 282 { 283 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 284 // The parameter 'true' indicates a critical memory allocation. 285 // This means that CodeCacheMinimumFreeSpace is used, if necessary 286 const bool is_critical = true; 287 blob = new (size, is_critical) AdapterBlob(size, cb); 288 } 289 // Track memory usage statistic after releasing CodeCache_lock 290 MemoryService::track_code_cache_memory_usage(); 291 292 return blob; 293 } 294 295 VtableBlob::VtableBlob(const char* name, int size) : 296 BufferBlob(name, size) { 297 } 298 299 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 300 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 301 302 VtableBlob* blob = NULL; 303 unsigned int size = sizeof(VtableBlob); 304 // align the size to CodeEntryAlignment 305 size = align_code_offset(size); 306 size += round_to(buffer_size, oopSize); 307 assert(name != NULL, "must provide a name"); 308 { 309 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 310 blob = new (size) VtableBlob(name, size); 311 } 312 // Track memory usage statistic after releasing CodeCache_lock 313 MemoryService::track_code_cache_memory_usage(); 314 315 return blob; 316 } 317 318 //---------------------------------------------------------------------------------------------------- 319 // Implementation of MethodHandlesAdapterBlob 320 321 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 322 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 323 324 MethodHandlesAdapterBlob* blob = NULL; 325 unsigned int size = sizeof(MethodHandlesAdapterBlob); 326 // align the size to CodeEntryAlignment 327 size = align_code_offset(size); 328 size += round_to(buffer_size, oopSize); 329 { 330 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 331 // The parameter 'true' indicates a critical memory allocation. 332 // This means that CodeCacheMinimumFreeSpace is used, if necessary 333 const bool is_critical = true; 334 blob = new (size, is_critical) MethodHandlesAdapterBlob(size); 335 } 336 // Track memory usage statistic after releasing CodeCache_lock 337 MemoryService::track_code_cache_memory_usage(); 338 339 return blob; 340 } 341 342 343 //---------------------------------------------------------------------------------------------------- 344 // Implementation of RuntimeStub 345 346 RuntimeStub::RuntimeStub( 347 const char* name, 348 CodeBuffer* cb, 349 int size, 350 int frame_complete, 351 int frame_size, 352 OopMapSet* oop_maps, 353 bool caller_must_gc_arguments 354 ) 355 : CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps) 356 { 357 _caller_must_gc_arguments = caller_must_gc_arguments; 358 } 359 360 361 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 362 CodeBuffer* cb, 363 int frame_complete, 364 int frame_size, 365 OopMapSet* oop_maps, 366 bool caller_must_gc_arguments) 367 { 368 RuntimeStub* stub = NULL; 369 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 370 { 371 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 372 unsigned int size = allocation_size(cb, sizeof(RuntimeStub)); 373 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 374 } 375 376 trace_new_stub(stub, "RuntimeStub - ", stub_name); 377 378 return stub; 379 } 380 381 382 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 383 void* p = CodeCache::allocate(size, true); 384 if (!p) fatal("Initial size of CodeCache is too small"); 385 return p; 386 } 387 388 // operator new shared by all singletons: 389 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 390 void* p = CodeCache::allocate(size, true); 391 if (!p) fatal("Initial size of CodeCache is too small"); 392 return p; 393 } 394 395 396 //---------------------------------------------------------------------------------------------------- 397 // Implementation of DeoptimizationBlob 398 399 DeoptimizationBlob::DeoptimizationBlob( 400 CodeBuffer* cb, 401 int size, 402 OopMapSet* oop_maps, 403 int unpack_offset, 404 int unpack_with_exception_offset, 405 int unpack_with_reexecution_offset, 406 int frame_size 407 ) 408 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 409 { 410 _unpack_offset = unpack_offset; 411 _unpack_with_exception = unpack_with_exception_offset; 412 _unpack_with_reexecution = unpack_with_reexecution_offset; 413 #ifdef COMPILER1 414 _unpack_with_exception_in_tls = -1; 415 #endif 416 } 417 418 419 DeoptimizationBlob* DeoptimizationBlob::create( 420 CodeBuffer* cb, 421 OopMapSet* oop_maps, 422 int unpack_offset, 423 int unpack_with_exception_offset, 424 int unpack_with_reexecution_offset, 425 int frame_size) 426 { 427 DeoptimizationBlob* blob = NULL; 428 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 429 { 430 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 431 unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob)); 432 blob = new (size) DeoptimizationBlob(cb, 433 size, 434 oop_maps, 435 unpack_offset, 436 unpack_with_exception_offset, 437 unpack_with_reexecution_offset, 438 frame_size); 439 } 440 441 trace_new_stub(blob, "DeoptimizationBlob"); 442 443 return blob; 444 } 445 446 447 //---------------------------------------------------------------------------------------------------- 448 // Implementation of UncommonTrapBlob 449 450 #ifdef COMPILER2 451 UncommonTrapBlob::UncommonTrapBlob( 452 CodeBuffer* cb, 453 int size, 454 OopMapSet* oop_maps, 455 int frame_size 456 ) 457 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 458 {} 459 460 461 UncommonTrapBlob* UncommonTrapBlob::create( 462 CodeBuffer* cb, 463 OopMapSet* oop_maps, 464 int frame_size) 465 { 466 UncommonTrapBlob* blob = NULL; 467 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 468 { 469 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 470 unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob)); 471 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 472 } 473 474 trace_new_stub(blob, "UncommonTrapBlob"); 475 476 return blob; 477 } 478 479 480 #endif // COMPILER2 481 482 483 //---------------------------------------------------------------------------------------------------- 484 // Implementation of ExceptionBlob 485 486 #ifdef COMPILER2 487 ExceptionBlob::ExceptionBlob( 488 CodeBuffer* cb, 489 int size, 490 OopMapSet* oop_maps, 491 int frame_size 492 ) 493 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 494 {} 495 496 497 ExceptionBlob* ExceptionBlob::create( 498 CodeBuffer* cb, 499 OopMapSet* oop_maps, 500 int frame_size) 501 { 502 ExceptionBlob* blob = NULL; 503 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 504 { 505 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 506 unsigned int size = allocation_size(cb, sizeof(ExceptionBlob)); 507 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 508 } 509 510 trace_new_stub(blob, "ExceptionBlob"); 511 512 return blob; 513 } 514 515 516 #endif // COMPILER2 517 518 519 //---------------------------------------------------------------------------------------------------- 520 // Implementation of SafepointBlob 521 522 SafepointBlob::SafepointBlob( 523 CodeBuffer* cb, 524 int size, 525 OopMapSet* oop_maps, 526 int frame_size 527 ) 528 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 529 {} 530 531 532 SafepointBlob* SafepointBlob::create( 533 CodeBuffer* cb, 534 OopMapSet* oop_maps, 535 int frame_size) 536 { 537 SafepointBlob* blob = NULL; 538 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 539 { 540 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 541 unsigned int size = allocation_size(cb, sizeof(SafepointBlob)); 542 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 543 } 544 545 trace_new_stub(blob, "SafepointBlob"); 546 547 return blob; 548 } 549 550 551 //---------------------------------------------------------------------------------------------------- 552 // Verification and printing 553 554 void CodeBlob::verify() { 555 ShouldNotReachHere(); 556 } 557 558 void CodeBlob::print_on(outputStream* st) const { 559 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 560 st->print_cr("Framesize: %d", _frame_size); 561 } 562 563 void CodeBlob::print_value_on(outputStream* st) const { 564 st->print_cr("[CodeBlob]"); 565 } 566 567 void BufferBlob::verify() { 568 // unimplemented 569 } 570 571 void BufferBlob::print_on(outputStream* st) const { 572 CodeBlob::print_on(st); 573 print_value_on(st); 574 } 575 576 void BufferBlob::print_value_on(outputStream* st) const { 577 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 578 } 579 580 void RuntimeStub::verify() { 581 // unimplemented 582 } 583 584 void RuntimeStub::print_on(outputStream* st) const { 585 ttyLocker ttyl; 586 CodeBlob::print_on(st); 587 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 588 st->print_cr("%s", name()); 589 Disassembler::decode((CodeBlob*)this, st); 590 } 591 592 void RuntimeStub::print_value_on(outputStream* st) const { 593 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 594 } 595 596 void SingletonBlob::verify() { 597 // unimplemented 598 } 599 600 void SingletonBlob::print_on(outputStream* st) const { 601 ttyLocker ttyl; 602 CodeBlob::print_on(st); 603 st->print_cr("%s", name()); 604 Disassembler::decode((CodeBlob*)this, st); 605 } 606 607 void SingletonBlob::print_value_on(outputStream* st) const { 608 st->print_cr("%s", name()); 609 } 610 611 void DeoptimizationBlob::print_value_on(outputStream* st) const { 612 st->print_cr("Deoptimization (frame not available)"); 613 }