1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/codeBuffer.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "memory/gcLocker.hpp" 29 #include "oops/methodData.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/icache.hpp" 32 #include "utilities/copy.hpp" 33 #include "utilities/xmlstream.hpp" 34 35 // The structure of a CodeSection: 36 // 37 // _start -> +----------------+ 38 // | machine code...| 39 // _end -> |----------------| 40 // | | 41 // | (empty) | 42 // | | 43 // | | 44 // +----------------+ 45 // _limit -> | | 46 // 47 // _locs_start -> +----------------+ 48 // |reloc records...| 49 // |----------------| 50 // _locs_end -> | | 51 // | | 52 // | (empty) | 53 // | | 54 // | | 55 // +----------------+ 56 // _locs_limit -> | | 57 // The _end (resp. _limit) pointer refers to the first 58 // unused (resp. unallocated) byte. 59 60 // The structure of the CodeBuffer while code is being accumulated: 61 // 62 // _total_start -> \ 63 // _insts._start -> +----------------+ 64 // | | 65 // | Code | 66 // | | 67 // _stubs._start -> |----------------| 68 // | | 69 // | Stubs | (also handlers for deopt/exception) 70 // | | 71 // _consts._start -> |----------------| 72 // | | 73 // | Constants | 74 // | | 75 // +----------------+ 76 // + _total_size -> | | 77 // 78 // When the code and relocations are copied to the code cache, 79 // the empty parts of each section are removed, and everything 80 // is copied into contiguous locations. 81 82 typedef CodeBuffer::csize_t csize_t; // file-local definition 83 84 // External buffer, in a predefined CodeBlob. 85 // Important: The code_start must be taken exactly, and not realigned. 86 CodeBuffer::CodeBuffer(CodeBlob* blob) { 87 initialize_misc("static buffer"); 88 initialize(blob->content_begin(), blob->content_size()); 89 verify_section_allocation(); 90 } 91 92 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { 93 // Compute maximal alignment. 94 int align = _insts.alignment(); 95 // Always allow for empty slop around each section. 96 int slop = (int) CodeSection::end_slop(); 97 98 assert(blob() == NULL, "only once"); 99 set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1))); 100 if (blob() == NULL) { 101 // The assembler constructor will throw a fatal on an empty CodeBuffer. 102 return; // caller must test this 103 } 104 105 // Set up various pointers into the blob. 106 initialize(_total_start, _total_size); 107 108 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 109 110 pd_initialize(); 111 112 if (locs_size != 0) { 113 _insts.initialize_locs(locs_size / sizeof(relocInfo)); 114 } 115 116 verify_section_allocation(); 117 } 118 119 120 CodeBuffer::~CodeBuffer() { 121 verify_section_allocation(); 122 123 // If we allocate our code buffer from the CodeCache 124 // via a BufferBlob, and it's not permanent, then 125 // free the BufferBlob. 126 // The rest of the memory will be freed when the ResourceObj 127 // is released. 128 for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) { 129 // Previous incarnations of this buffer are held live, so that internal 130 // addresses constructed before expansions will not be confused. 131 cb->free_blob(); 132 } 133 134 // free any overflow storage 135 delete _overflow_arena; 136 137 #ifdef ASSERT 138 // Save allocation type to execute assert in ~ResourceObj() 139 // which is called after this destructor. 140 assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object"); 141 ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type(); 142 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue); 143 ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at); 144 #endif 145 } 146 147 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) { 148 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once"); 149 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen 150 _oop_recorder = r; 151 } 152 153 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) { 154 assert(cs != &_insts, "insts is the memory provider, not the consumer"); 155 csize_t slop = CodeSection::end_slop(); // margin between sections 156 int align = cs->alignment(); 157 assert(is_power_of_2(align), "sanity"); 158 address start = _insts._start; 159 address limit = _insts._limit; 160 address middle = limit - size; 161 middle -= (intptr_t)middle & (align-1); // align the division point downward 162 guarantee(middle - slop > start, "need enough space to divide up"); 163 _insts._limit = middle - slop; // subtract desired space, plus slop 164 cs->initialize(middle, limit - middle); 165 assert(cs->start() == middle, "sanity"); 166 assert(cs->limit() == limit, "sanity"); 167 // give it some relocations to start with, if the main section has them 168 if (_insts.has_locs()) cs->initialize_locs(1); 169 } 170 171 void CodeBuffer::freeze_section(CodeSection* cs) { 172 CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1); 173 csize_t frozen_size = cs->size(); 174 if (next_cs != NULL) { 175 frozen_size = next_cs->align_at_start(frozen_size); 176 } 177 address old_limit = cs->limit(); 178 address new_limit = cs->start() + frozen_size; 179 relocInfo* old_locs_limit = cs->locs_limit(); 180 relocInfo* new_locs_limit = cs->locs_end(); 181 // Patch the limits. 182 cs->_limit = new_limit; 183 cs->_locs_limit = new_locs_limit; 184 cs->_frozen = true; 185 if (!next_cs->is_allocated() && !next_cs->is_frozen()) { 186 // Give remaining buffer space to the following section. 187 next_cs->initialize(new_limit, old_limit - new_limit); 188 next_cs->initialize_shared_locs(new_locs_limit, 189 old_locs_limit - new_locs_limit); 190 } 191 } 192 193 void CodeBuffer::set_blob(BufferBlob* blob) { 194 _blob = blob; 195 if (blob != NULL) { 196 address start = blob->content_begin(); 197 address end = blob->content_end(); 198 // Round up the starting address. 199 int align = _insts.alignment(); 200 start += (-(intptr_t)start) & (align-1); 201 _total_start = start; 202 _total_size = end - start; 203 } else { 204 #ifdef ASSERT 205 // Clean out dangling pointers. 206 _total_start = badAddress; 207 _consts._start = _consts._end = badAddress; 208 _insts._start = _insts._end = badAddress; 209 _stubs._start = _stubs._end = badAddress; 210 #endif //ASSERT 211 } 212 } 213 214 void CodeBuffer::free_blob() { 215 if (_blob != NULL) { 216 BufferBlob::free(_blob); 217 set_blob(NULL); 218 } 219 } 220 221 const char* CodeBuffer::code_section_name(int n) { 222 #ifdef PRODUCT 223 return NULL; 224 #else //PRODUCT 225 switch (n) { 226 case SECT_CONSTS: return "consts"; 227 case SECT_INSTS: return "insts"; 228 case SECT_STUBS: return "stubs"; 229 default: return NULL; 230 } 231 #endif //PRODUCT 232 } 233 234 int CodeBuffer::section_index_of(address addr) const { 235 for (int n = 0; n < (int)SECT_LIMIT; n++) { 236 const CodeSection* cs = code_section(n); 237 if (cs->allocates(addr)) return n; 238 } 239 return SECT_NONE; 240 } 241 242 int CodeBuffer::locator(address addr) const { 243 for (int n = 0; n < (int)SECT_LIMIT; n++) { 244 const CodeSection* cs = code_section(n); 245 if (cs->allocates(addr)) { 246 return locator(addr - cs->start(), n); 247 } 248 } 249 return -1; 250 } 251 252 address CodeBuffer::locator_address(int locator) const { 253 if (locator < 0) return NULL; 254 address start = code_section(locator_sect(locator))->start(); 255 return start + locator_pos(locator); 256 } 257 258 bool CodeBuffer::is_backward_branch(Label& L) { 259 return L.is_bound() && insts_end() <= locator_address(L.loc()); 260 } 261 262 address CodeBuffer::decode_begin() { 263 address begin = _insts.start(); 264 if (_decode_begin != NULL && _decode_begin > begin) 265 begin = _decode_begin; 266 return begin; 267 } 268 269 270 GrowableArray<int>* CodeBuffer::create_patch_overflow() { 271 if (_overflow_arena == NULL) { 272 _overflow_arena = new (mtCode) Arena(mtCode); 273 } 274 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); 275 } 276 277 278 // Helper function for managing labels and their target addresses. 279 // Returns a sensible address, and if it is not the label's final 280 // address, notes the dependency (at 'branch_pc') on the label. 281 address CodeSection::target(Label& L, address branch_pc) { 282 if (L.is_bound()) { 283 int loc = L.loc(); 284 if (index() == CodeBuffer::locator_sect(loc)) { 285 return start() + CodeBuffer::locator_pos(loc); 286 } else { 287 return outer()->locator_address(loc); 288 } 289 } else { 290 assert(allocates2(branch_pc), "sanity"); 291 address base = start(); 292 int patch_loc = CodeBuffer::locator(branch_pc - base, index()); 293 L.add_patch_at(outer(), patch_loc); 294 295 // Need to return a pc, doesn't matter what it is since it will be 296 // replaced during resolution later. 297 // Don't return NULL or badAddress, since branches shouldn't overflow. 298 // Don't return base either because that could overflow displacements 299 // for shorter branches. It will get checked when bound. 300 return branch_pc; 301 } 302 } 303 304 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) { 305 Relocation* reloc = spec.reloc(); 306 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type(); 307 if (rtype == relocInfo::none) return; 308 309 // The assertion below has been adjusted, to also work for 310 // relocation for fixup. Sometimes we want to put relocation 311 // information for the next instruction, since it will be patched 312 // with a call. 313 assert(start() <= at && at <= end()+1, 314 "cannot relocate data outside code boundaries"); 315 316 if (!has_locs()) { 317 // no space for relocation information provided => code cannot be 318 // relocated. Make sure that relocate is only called with rtypes 319 // that can be ignored for this kind of code. 320 assert(rtype == relocInfo::none || 321 rtype == relocInfo::runtime_call_type || 322 rtype == relocInfo::internal_word_type|| 323 rtype == relocInfo::section_word_type || 324 rtype == relocInfo::external_word_type, 325 "code needs relocation information"); 326 // leave behind an indication that we attempted a relocation 327 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); 328 return; 329 } 330 331 // Advance the point, noting the offset we'll have to record. 332 csize_t offset = at - locs_point(); 333 set_locs_point(at); 334 335 // Test for a couple of overflow conditions; maybe expand the buffer. 336 relocInfo* end = locs_end(); 337 relocInfo* req = end + relocInfo::length_limit; 338 // Check for (potential) overflow 339 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) { 340 req += (uint)offset / (uint)relocInfo::offset_limit(); 341 if (req >= locs_limit()) { 342 // Allocate or reallocate. 343 expand_locs(locs_count() + (req - end)); 344 // reload pointer 345 end = locs_end(); 346 } 347 } 348 349 // If the offset is giant, emit filler relocs, of type 'none', but 350 // each carrying the largest possible offset, to advance the locs_point. 351 while (offset >= relocInfo::offset_limit()) { 352 assert(end < locs_limit(), "adjust previous paragraph of code"); 353 *end++ = filler_relocInfo(); 354 offset -= filler_relocInfo().addr_offset(); 355 } 356 357 // If it's a simple reloc with no data, we'll just write (rtype | offset). 358 (*end) = relocInfo(rtype, offset, format); 359 360 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2. 361 end->initialize(this, reloc); 362 } 363 364 void CodeSection::initialize_locs(int locs_capacity) { 365 assert(_locs_start == NULL, "only one locs init step, please"); 366 // Apply a priori lower limits to relocation size: 367 csize_t min_locs = MAX2(size() / 16, (csize_t)4); 368 if (locs_capacity < min_locs) locs_capacity = min_locs; 369 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity); 370 _locs_start = locs_start; 371 _locs_end = locs_start; 372 _locs_limit = locs_start + locs_capacity; 373 _locs_own = true; 374 } 375 376 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) { 377 assert(_locs_start == NULL, "do this before locs are allocated"); 378 // Internal invariant: locs buf must be fully aligned. 379 // See copy_relocations_to() below. 380 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) { 381 ++buf; --length; 382 } 383 if (length > 0) { 384 _locs_start = buf; 385 _locs_end = buf; 386 _locs_limit = buf + length; 387 _locs_own = false; 388 } 389 } 390 391 void CodeSection::initialize_locs_from(const CodeSection* source_cs) { 392 int lcount = source_cs->locs_count(); 393 if (lcount != 0) { 394 initialize_shared_locs(source_cs->locs_start(), lcount); 395 _locs_end = _locs_limit = _locs_start + lcount; 396 assert(is_allocated(), "must have copied code already"); 397 set_locs_point(start() + source_cs->locs_point_off()); 398 } 399 assert(this->locs_count() == source_cs->locs_count(), "sanity"); 400 } 401 402 void CodeSection::expand_locs(int new_capacity) { 403 if (_locs_start == NULL) { 404 initialize_locs(new_capacity); 405 return; 406 } else { 407 int old_count = locs_count(); 408 int old_capacity = locs_capacity(); 409 if (new_capacity < old_capacity * 2) 410 new_capacity = old_capacity * 2; 411 relocInfo* locs_start; 412 if (_locs_own) { 413 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 414 } else { 415 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 416 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 417 _locs_own = true; 418 } 419 _locs_start = locs_start; 420 _locs_end = locs_start + old_count; 421 _locs_limit = locs_start + new_capacity; 422 } 423 } 424 425 426 /// Support for emitting the code to its final location. 427 /// The pattern is the same for all functions. 428 /// We iterate over all the sections, padding each to alignment. 429 430 csize_t CodeBuffer::total_content_size() const { 431 csize_t size_so_far = 0; 432 for (int n = 0; n < (int)SECT_LIMIT; n++) { 433 const CodeSection* cs = code_section(n); 434 if (cs->is_empty()) continue; // skip trivial section 435 size_so_far = cs->align_at_start(size_so_far); 436 size_so_far += cs->size(); 437 } 438 return size_so_far; 439 } 440 441 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 442 address buf = dest->_total_start; 443 csize_t buf_offset = 0; 444 assert(dest->_total_size >= total_content_size(), "must be big enough"); 445 446 { 447 // not sure why this is here, but why not... 448 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); 449 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); 450 } 451 452 const CodeSection* prev_cs = NULL; 453 CodeSection* prev_dest_cs = NULL; 454 455 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 456 // figure compact layout of each section 457 const CodeSection* cs = code_section(n); 458 csize_t csize = cs->size(); 459 460 CodeSection* dest_cs = dest->code_section(n); 461 if (!cs->is_empty()) { 462 // Compute initial padding; assign it to the previous non-empty guy. 463 // Cf. figure_expanded_capacities. 464 csize_t padding = cs->align_at_start(buf_offset) - buf_offset; 465 if (padding != 0) { 466 buf_offset += padding; 467 assert(prev_dest_cs != NULL, "sanity"); 468 prev_dest_cs->_limit += padding; 469 } 470 #ifdef ASSERT 471 if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) { 472 // Make sure the ends still match up. 473 // This is important because a branch in a frozen section 474 // might target code in a following section, via a Label, 475 // and without a relocation record. See Label::patch_instructions. 476 address dest_start = buf+buf_offset; 477 csize_t start2start = cs->start() - prev_cs->start(); 478 csize_t dest_start2start = dest_start - prev_dest_cs->start(); 479 assert(start2start == dest_start2start, "cannot stretch frozen sect"); 480 } 481 #endif //ASSERT 482 prev_dest_cs = dest_cs; 483 prev_cs = cs; 484 } 485 486 debug_only(dest_cs->_start = NULL); // defeat double-initialization assert 487 dest_cs->initialize(buf+buf_offset, csize); 488 dest_cs->set_end(buf+buf_offset+csize); 489 assert(dest_cs->is_allocated(), "must always be allocated"); 490 assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); 491 492 buf_offset += csize; 493 } 494 495 // Done calculating sections; did it come out to the right end? 496 assert(buf_offset == total_content_size(), "sanity"); 497 dest->verify_section_allocation(); 498 } 499 500 // Append an oop reference that keeps the class alive. 501 static void append_oop_references(GrowableArray<oop>* oops, Klass* k) { 502 oop cl = k->klass_holder(); 503 if (cl != NULL && !oops->contains(cl)) { 504 oops->append(cl); 505 } 506 } 507 508 void CodeBuffer::finalize_oop_references(methodHandle mh) { 509 No_Safepoint_Verifier nsv; 510 511 GrowableArray<oop> oops; 512 513 // Make sure that immediate metadata records something in the OopRecorder 514 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 515 // pull code out of each section 516 CodeSection* cs = code_section(n); 517 if (cs->is_empty()) continue; // skip trivial section 518 RelocIterator iter(cs); 519 while (iter.next()) { 520 if (iter.type() == relocInfo::metadata_type) { 521 metadata_Relocation* md = iter.metadata_reloc(); 522 if (md->metadata_is_immediate()) { 523 Metadata* m = md->metadata_value(); 524 if (oop_recorder()->is_real(m)) { 525 if (m->is_methodData()) { 526 m = ((MethodData*)m)->method(); 527 } 528 if (m->is_method()) { 529 m = ((Method*)m)->method_holder(); 530 } 531 if (m->is_klass()) { 532 append_oop_references(&oops, (Klass*)m); 533 } else { 534 // XXX This will currently occur for MDO which don't 535 // have a backpointer. This has to be fixed later. 536 m->print(); 537 ShouldNotReachHere(); 538 } 539 } 540 } 541 } 542 } 543 } 544 545 if (!oop_recorder()->is_unused()) { 546 for (int i = 0; i < oop_recorder()->metadata_count(); i++) { 547 Metadata* m = oop_recorder()->metadata_at(i); 548 if (oop_recorder()->is_real(m)) { 549 if (m->is_methodData()) { 550 m = ((MethodData*)m)->method(); 551 } 552 if (m->is_method()) { 553 m = ((Method*)m)->method_holder(); 554 } 555 if (m->is_klass()) { 556 append_oop_references(&oops, (Klass*)m); 557 } else { 558 m->print(); 559 ShouldNotReachHere(); 560 } 561 } 562 } 563 564 } 565 566 // Add the class loader of Method* for the nmethod itself 567 append_oop_references(&oops, mh->method_holder()); 568 569 // Add any oops that we've found 570 Thread* thread = Thread::current(); 571 for (int i = 0; i < oops.length(); i++) { 572 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); 573 } 574 } 575 576 577 578 csize_t CodeBuffer::total_offset_of(CodeSection* cs) const { 579 csize_t size_so_far = 0; 580 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 581 const CodeSection* cur_cs = code_section(n); 582 if (!cur_cs->is_empty()) { 583 size_so_far = cur_cs->align_at_start(size_so_far); 584 } 585 if (cur_cs->index() == cs->index()) { 586 return size_so_far; 587 } 588 size_so_far += cur_cs->size(); 589 } 590 ShouldNotReachHere(); 591 return -1; 592 } 593 594 csize_t CodeBuffer::total_relocation_size() const { 595 csize_t lsize = copy_relocations_to(NULL); // dry run only 596 csize_t csize = total_content_size(); 597 csize_t total = RelocIterator::locs_and_index_size(csize, lsize); 598 return (csize_t) align_size_up(total, HeapWordSize); 599 } 600 601 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { 602 address buf = NULL; 603 csize_t buf_offset = 0; 604 csize_t buf_limit = 0; 605 if (dest != NULL) { 606 buf = (address)dest->relocation_begin(); 607 buf_limit = (address)dest->relocation_end() - buf; 608 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); 609 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); 610 } 611 // if dest == NULL, this is just the sizing pass 612 613 csize_t code_end_so_far = 0; 614 csize_t code_point_so_far = 0; 615 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 616 // pull relocs out of each section 617 const CodeSection* cs = code_section(n); 618 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); 619 if (cs->is_empty()) continue; // skip trivial section 620 relocInfo* lstart = cs->locs_start(); 621 relocInfo* lend = cs->locs_end(); 622 csize_t lsize = (csize_t)( (address)lend - (address)lstart ); 623 csize_t csize = cs->size(); 624 code_end_so_far = cs->align_at_start(code_end_so_far); 625 626 if (lsize > 0) { 627 // Figure out how to advance the combined relocation point 628 // first to the beginning of this section. 629 // We'll insert one or more filler relocs to span that gap. 630 // (Don't bother to improve this by editing the first reloc's offset.) 631 csize_t new_code_point = code_end_so_far; 632 for (csize_t jump; 633 code_point_so_far < new_code_point; 634 code_point_so_far += jump) { 635 jump = new_code_point - code_point_so_far; 636 relocInfo filler = filler_relocInfo(); 637 if (jump >= filler.addr_offset()) { 638 jump = filler.addr_offset(); 639 } else { // else shrink the filler to fit 640 filler = relocInfo(relocInfo::none, jump); 641 } 642 if (buf != NULL) { 643 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); 644 *(relocInfo*)(buf+buf_offset) = filler; 645 } 646 buf_offset += sizeof(filler); 647 } 648 649 // Update code point and end to skip past this section: 650 csize_t last_code_point = code_end_so_far + cs->locs_point_off(); 651 assert(code_point_so_far <= last_code_point, "sanity"); 652 code_point_so_far = last_code_point; // advance past this guy's relocs 653 } 654 code_end_so_far += csize; // advance past this guy's instructions too 655 656 // Done with filler; emit the real relocations: 657 if (buf != NULL && lsize != 0) { 658 assert(buf_offset + lsize <= buf_limit, "target in bounds"); 659 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); 660 if (buf_offset % HeapWordSize == 0) { 661 // Use wordwise copies if possible: 662 Copy::disjoint_words((HeapWord*)lstart, 663 (HeapWord*)(buf+buf_offset), 664 (lsize + HeapWordSize-1) / HeapWordSize); 665 } else { 666 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 667 } 668 } 669 buf_offset += lsize; 670 } 671 672 // Align end of relocation info in target. 673 while (buf_offset % HeapWordSize != 0) { 674 if (buf != NULL) { 675 relocInfo padding = relocInfo(relocInfo::none, 0); 676 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); 677 *(relocInfo*)(buf+buf_offset) = padding; 678 } 679 buf_offset += sizeof(relocInfo); 680 } 681 682 assert(code_end_so_far == total_content_size(), "sanity"); 683 684 // Account for index: 685 if (buf != NULL) { 686 RelocIterator::create_index(dest->relocation_begin(), 687 buf_offset / sizeof(relocInfo), 688 dest->relocation_end()); 689 } 690 691 return buf_offset; 692 } 693 694 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) { 695 #ifndef PRODUCT 696 if (PrintNMethods && (WizardMode || Verbose)) { 697 tty->print("done with CodeBuffer:"); 698 ((CodeBuffer*)this)->print(); 699 } 700 #endif //PRODUCT 701 702 CodeBuffer dest(dest_blob); 703 assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 704 this->compute_final_layout(&dest); 705 relocate_code_to(&dest); 706 707 // transfer strings and comments from buffer to blob 708 dest_blob->set_strings(_strings); 709 710 // Done moving code bytes; were they the right size? 711 assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 712 713 // Flush generated code 714 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 715 } 716 717 // Move all my code into another code buffer. Consult applicable 718 // relocs to repair embedded addresses. The layout in the destination 719 // CodeBuffer is different to the source CodeBuffer: the destination 720 // CodeBuffer gets the final layout (consts, insts, stubs in order of 721 // ascending address). 722 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { 723 address dest_end = dest->_total_start + dest->_total_size; 724 address dest_filled = NULL; 725 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 726 // pull code out of each section 727 const CodeSection* cs = code_section(n); 728 if (cs->is_empty()) continue; // skip trivial section 729 CodeSection* dest_cs = dest->code_section(n); 730 assert(cs->size() == dest_cs->size(), "sanity"); 731 csize_t usize = dest_cs->size(); 732 csize_t wsize = align_size_up(usize, HeapWordSize); 733 assert(dest_cs->start() + wsize <= dest_end, "no overflow"); 734 // Copy the code as aligned machine words. 735 // This may also include an uninitialized partial word at the end. 736 Copy::disjoint_words((HeapWord*)cs->start(), 737 (HeapWord*)dest_cs->start(), 738 wsize / HeapWordSize); 739 740 if (dest->blob() == NULL) { 741 // Destination is a final resting place, not just another buffer. 742 // Normalize uninitialized bytes in the final padding. 743 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), 744 Assembler::code_fill_byte()); 745 } 746 // Keep track of the highest filled address 747 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); 748 749 assert(cs->locs_start() != (relocInfo*)badAddress, 750 "this section carries no reloc storage, but reloc was attempted"); 751 752 // Make the new code copy use the old copy's relocations: 753 dest_cs->initialize_locs_from(cs); 754 } 755 756 // Do relocation after all sections are copied. 757 // This is necessary if the code uses constants in stubs, which are 758 // relocated when the corresponding instruction in the code (e.g., a 759 // call) is relocated. Stubs are placed behind the main code 760 // section, so that section has to be copied before relocating. 761 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { 762 // pull code out of each section 763 const CodeSection* cs = code_section(n); 764 if (cs->is_empty()) continue; // skip trivial section 765 CodeSection* dest_cs = dest->code_section(n); 766 { // Repair the pc relative information in the code after the move 767 RelocIterator iter(dest_cs); 768 while (iter.next()) { 769 iter.reloc()->fix_relocation_after_move(this, dest); 770 } 771 } 772 } 773 774 if (dest->blob() == NULL && dest_filled != NULL) { 775 // Destination is a final resting place, not just another buffer. 776 // Normalize uninitialized bytes in the final padding. 777 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, 778 Assembler::code_fill_byte()); 779 780 } 781 } 782 783 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, 784 csize_t amount, 785 csize_t* new_capacity) { 786 csize_t new_total_cap = 0; 787 788 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 789 const CodeSection* sect = code_section(n); 790 791 if (!sect->is_empty()) { 792 // Compute initial padding; assign it to the previous section, 793 // even if it's empty (e.g. consts section can be empty). 794 // Cf. compute_final_layout 795 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; 796 if (padding != 0) { 797 new_total_cap += padding; 798 assert(n - 1 >= SECT_FIRST, "sanity"); 799 new_capacity[n - 1] += padding; 800 } 801 } 802 803 csize_t exp = sect->size(); // 100% increase 804 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase 805 if (sect == which_cs) { 806 if (exp < amount) exp = amount; 807 if (StressCodeBuffers) exp = amount; // expand only slightly 808 } else if (n == SECT_INSTS) { 809 // scale down inst increases to a more modest 25% 810 exp = 4*K + ((exp - 4*K) >> 2); 811 if (StressCodeBuffers) exp = amount / 2; // expand only slightly 812 } else if (sect->is_empty()) { 813 // do not grow an empty secondary section 814 exp = 0; 815 } 816 // Allow for inter-section slop: 817 exp += CodeSection::end_slop(); 818 csize_t new_cap = sect->size() + exp; 819 if (new_cap < sect->capacity()) { 820 // No need to expand after all. 821 new_cap = sect->capacity(); 822 } 823 new_capacity[n] = new_cap; 824 new_total_cap += new_cap; 825 } 826 827 return new_total_cap; 828 } 829 830 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { 831 #ifndef PRODUCT 832 if (PrintNMethods && (WizardMode || Verbose)) { 833 tty->print("expanding CodeBuffer:"); 834 this->print(); 835 } 836 837 if (StressCodeBuffers && blob() != NULL) { 838 static int expand_count = 0; 839 if (expand_count >= 0) expand_count += 1; 840 if (expand_count > 100 && is_power_of_2(expand_count)) { 841 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count); 842 // simulate an occasional allocation failure: 843 free_blob(); 844 } 845 } 846 #endif //PRODUCT 847 848 // Resizing must be allowed 849 { 850 if (blob() == NULL) return; // caller must check for blob == NULL 851 for (int n = 0; n < (int)SECT_LIMIT; n++) { 852 guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen"); 853 } 854 } 855 856 // Figure new capacity for each section. 857 csize_t new_capacity[SECT_LIMIT]; 858 csize_t new_total_cap 859 = figure_expanded_capacities(which_cs, amount, new_capacity); 860 861 // Create a new (temporary) code buffer to hold all the new data 862 CodeBuffer cb(name(), new_total_cap, 0); 863 if (cb.blob() == NULL) { 864 // Failed to allocate in code cache. 865 free_blob(); 866 return; 867 } 868 869 // Create an old code buffer to remember which addresses used to go where. 870 // This will be useful when we do final assembly into the code cache, 871 // because we will need to know how to warp any internal address that 872 // has been created at any time in this CodeBuffer's past. 873 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size); 874 bxp->take_over_code_from(this); // remember the old undersized blob 875 DEBUG_ONLY(this->_blob = NULL); // silence a later assert 876 bxp->_before_expand = this->_before_expand; 877 this->_before_expand = bxp; 878 879 // Give each section its required (expanded) capacity. 880 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) { 881 CodeSection* cb_sect = cb.code_section(n); 882 CodeSection* this_sect = code_section(n); 883 if (new_capacity[n] == 0) continue; // already nulled out 884 if (n != SECT_INSTS) { 885 cb.initialize_section_size(cb_sect, new_capacity[n]); 886 } 887 assert(cb_sect->capacity() >= new_capacity[n], "big enough"); 888 address cb_start = cb_sect->start(); 889 cb_sect->set_end(cb_start + this_sect->size()); 890 if (this_sect->mark() == NULL) { 891 cb_sect->clear_mark(); 892 } else { 893 cb_sect->set_mark(cb_start + this_sect->mark_off()); 894 } 895 } 896 897 // Move all the code and relocations to the new blob: 898 relocate_code_to(&cb); 899 900 // Copy the temporary code buffer into the current code buffer. 901 // Basically, do {*this = cb}, except for some control information. 902 this->take_over_code_from(&cb); 903 cb.set_blob(NULL); 904 905 // Zap the old code buffer contents, to avoid mistakenly using them. 906 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, 907 badCodeHeapFreeVal)); 908 909 _decode_begin = NULL; // sanity 910 911 // Make certain that the new sections are all snugly inside the new blob. 912 verify_section_allocation(); 913 914 #ifndef PRODUCT 915 if (PrintNMethods && (WizardMode || Verbose)) { 916 tty->print("expanded CodeBuffer:"); 917 this->print(); 918 } 919 #endif //PRODUCT 920 } 921 922 void CodeBuffer::take_over_code_from(CodeBuffer* cb) { 923 // Must already have disposed of the old blob somehow. 924 assert(blob() == NULL, "must be empty"); 925 #ifdef ASSERT 926 927 #endif 928 // Take the new blob away from cb. 929 set_blob(cb->blob()); 930 // Take over all the section pointers. 931 for (int n = 0; n < (int)SECT_LIMIT; n++) { 932 CodeSection* cb_sect = cb->code_section(n); 933 CodeSection* this_sect = code_section(n); 934 this_sect->take_over_code_from(cb_sect); 935 } 936 _overflow_arena = cb->_overflow_arena; 937 // Make sure the old cb won't try to use it or free it. 938 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress); 939 } 940 941 void CodeBuffer::verify_section_allocation() { 942 address tstart = _total_start; 943 if (tstart == badAddress) return; // smashed by set_blob(NULL) 944 address tend = tstart + _total_size; 945 if (_blob != NULL) { 946 947 guarantee(tstart >= _blob->content_begin(), "sanity"); 948 guarantee(tend <= _blob->content_end(), "sanity"); 949 } 950 // Verify disjointness. 951 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { 952 CodeSection* sect = code_section(n); 953 if (!sect->is_allocated() || sect->is_empty()) continue; 954 guarantee((intptr_t)sect->start() % sect->alignment() == 0 955 || sect->is_empty() || _blob == NULL, 956 "start is aligned"); 957 for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) { 958 CodeSection* other = code_section(m); 959 if (!other->is_allocated() || other == sect) continue; 960 guarantee(!other->contains(sect->start() ), "sanity"); 961 // limit is an exclusive address and can be the start of another 962 // section. 963 guarantee(!other->contains(sect->limit() - 1), "sanity"); 964 } 965 guarantee(sect->end() <= tend, "sanity"); 966 guarantee(sect->end() <= sect->limit(), "sanity"); 967 } 968 } 969 970 void CodeBuffer::log_section_sizes(const char* name) { 971 if (xtty != NULL) { 972 ttyLocker ttyl; 973 // log info about buffer usage 974 xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size); 975 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) { 976 CodeSection* sect = code_section(n); 977 if (!sect->is_allocated() || sect->is_empty()) continue; 978 xtty->print_cr("<sect index='%d' size='" SIZE_FORMAT "' free='" SIZE_FORMAT "'/>", 979 n, sect->limit() - sect->start(), sect->limit() - sect->end()); 980 } 981 xtty->print_cr("</blob>"); 982 } 983 } 984 985 #ifndef PRODUCT 986 987 void CodeSection::dump() { 988 address ptr = start(); 989 for (csize_t step; ptr < end(); ptr += step) { 990 step = end() - ptr; 991 if (step > jintSize * 4) step = jintSize * 4; 992 tty->print(INTPTR_FORMAT ": ", p2i(ptr)); 993 while (step > 0) { 994 tty->print(" " PTR32_FORMAT, *(jint*)ptr); 995 ptr += jintSize; 996 } 997 tty->cr(); 998 } 999 } 1000 1001 1002 void CodeSection::decode() { 1003 Disassembler::decode(start(), end()); 1004 } 1005 1006 1007 void CodeBuffer::block_comment(intptr_t offset, const char * comment) { 1008 _strings.add_comment(offset, comment); 1009 } 1010 1011 const char* CodeBuffer::code_string(const char* str) { 1012 return _strings.add_string(str); 1013 } 1014 1015 class CodeString: public CHeapObj<mtCode> { 1016 private: 1017 friend class CodeStrings; 1018 const char * _string; 1019 CodeString* _next; 1020 intptr_t _offset; 1021 1022 ~CodeString() { 1023 assert(_next == NULL, "wrong interface for freeing list"); 1024 os::free((void*)_string, mtCode); 1025 } 1026 1027 bool is_comment() const { return _offset >= 0; } 1028 1029 public: 1030 CodeString(const char * string, intptr_t offset = -1) 1031 : _next(NULL), _offset(offset) { 1032 _string = os::strdup(string, mtCode); 1033 } 1034 1035 const char * string() const { return _string; } 1036 intptr_t offset() const { assert(_offset >= 0, "offset for non comment?"); return _offset; } 1037 CodeString* next() const { return _next; } 1038 1039 void set_next(CodeString* next) { _next = next; } 1040 1041 CodeString* first_comment() { 1042 if (is_comment()) { 1043 return this; 1044 } else { 1045 return next_comment(); 1046 } 1047 } 1048 CodeString* next_comment() const { 1049 CodeString* s = _next; 1050 while (s != NULL && !s->is_comment()) { 1051 s = s->_next; 1052 } 1053 return s; 1054 } 1055 }; 1056 1057 CodeString* CodeStrings::find(intptr_t offset) const { 1058 CodeString* a = _strings->first_comment(); 1059 while (a != NULL && a->offset() != offset) { 1060 a = a->next_comment(); 1061 } 1062 return a; 1063 } 1064 1065 // Convenience for add_comment. 1066 CodeString* CodeStrings::find_last(intptr_t offset) const { 1067 CodeString* a = find(offset); 1068 if (a != NULL) { 1069 CodeString* c = NULL; 1070 while (((c = a->next_comment()) != NULL) && (c->offset() == offset)) { 1071 a = c; 1072 } 1073 } 1074 return a; 1075 } 1076 1077 void CodeStrings::add_comment(intptr_t offset, const char * comment) { 1078 CodeString* c = new CodeString(comment, offset); 1079 CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset); 1080 1081 if (inspos) { 1082 // insert after already existing comments with same offset 1083 c->set_next(inspos->next()); 1084 inspos->set_next(c); 1085 } else { 1086 // no comments with such offset, yet. Insert before anything else. 1087 c->set_next(_strings); 1088 _strings = c; 1089 } 1090 } 1091 1092 void CodeStrings::assign(CodeStrings& other) { 1093 _strings = other._strings; 1094 } 1095 1096 void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const { 1097 if (_strings != NULL) { 1098 CodeString* c = find(offset); 1099 while (c && c->offset() == offset) { 1100 stream->bol(); 1101 stream->print(" ;; "); 1102 stream->print_cr("%s", c->string()); 1103 c = c->next_comment(); 1104 } 1105 } 1106 } 1107 1108 1109 void CodeStrings::free() { 1110 CodeString* n = _strings; 1111 while (n) { 1112 // unlink the node from the list saving a pointer to the next 1113 CodeString* p = n->next(); 1114 n->set_next(NULL); 1115 delete n; 1116 n = p; 1117 } 1118 _strings = NULL; 1119 } 1120 1121 const char* CodeStrings::add_string(const char * string) { 1122 CodeString* s = new CodeString(string); 1123 s->set_next(_strings); 1124 _strings = s; 1125 assert(s->string() != NULL, "should have a string"); 1126 return s->string(); 1127 } 1128 1129 void CodeBuffer::decode() { 1130 ttyLocker ttyl; 1131 Disassembler::decode(decode_begin(), insts_end()); 1132 _decode_begin = insts_end(); 1133 } 1134 1135 1136 void CodeBuffer::skip_decode() { 1137 _decode_begin = insts_end(); 1138 } 1139 1140 1141 void CodeBuffer::decode_all() { 1142 ttyLocker ttyl; 1143 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1144 // dump contents of each section 1145 CodeSection* cs = code_section(n); 1146 tty->print_cr("! %s:", code_section_name(n)); 1147 if (cs != consts()) 1148 cs->decode(); 1149 else 1150 cs->dump(); 1151 } 1152 } 1153 1154 1155 void CodeSection::print(const char* name) { 1156 csize_t locs_size = locs_end() - locs_start(); 1157 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s", 1158 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity(), 1159 is_frozen()? " [frozen]": ""); 1160 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d", 1161 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off()); 1162 if (PrintRelocations) { 1163 RelocIterator iter(this); 1164 iter.print(); 1165 } 1166 } 1167 1168 void CodeBuffer::print() { 1169 if (this == NULL) { 1170 tty->print_cr("NULL CodeBuffer pointer"); 1171 return; 1172 } 1173 1174 tty->print_cr("CodeBuffer:"); 1175 for (int n = 0; n < (int)SECT_LIMIT; n++) { 1176 // print each section 1177 CodeSection* cs = code_section(n); 1178 cs->print(code_section_name(n)); 1179 } 1180 } 1181 1182 #endif // PRODUCT