1 /*
   2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "code/scopeDesc.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "gc/shared/collectedHeap.hpp"
  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/iterator.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/valueKlass.hpp"
  36 #include "runtime/frame.inline.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/signature.hpp"
  39 #include "utilities/align.hpp"
  40 #ifdef COMPILER1
  41 #include "c1/c1_Defs.hpp"
  42 #endif
  43 #ifdef COMPILER2
  44 #include "opto/optoreg.hpp"
  45 #endif
  46 
  47 // OopMapStream
  48 
  49 OopMapStream::OopMapStream(OopMap* oop_map, int oop_types_mask) {
  50   _stream = new CompressedReadStream(oop_map->write_stream()->buffer());
  51   _mask = oop_types_mask;
  52   _size = oop_map->omv_count();
  53   _position = 0;
  54   _valid_omv = false;
  55 }
  56 
  57 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask) {
  58   _stream = new CompressedReadStream(oop_map->data_addr());
  59   _mask = oop_types_mask;
  60   _size = oop_map->count();
  61   _position = 0;
  62   _valid_omv = false;
  63 }
  64 
  65 void OopMapStream::find_next() {
  66   while(_position++ < _size) {
  67     _omv.read_from(_stream);
  68     if(((int)_omv.type() & _mask) > 0) {
  69       _valid_omv = true;
  70       return;
  71     }
  72   }
  73   _valid_omv = false;
  74 }
  75 
  76 
  77 // OopMap
  78 
  79 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
  80 // slots to hold 4-byte values like ints and floats in the LP64 build.
  81 OopMap::OopMap(int frame_size, int arg_count) {
  82   // OopMaps are usually quite so small, so pick a small initial size
  83   set_write_stream(new CompressedWriteStream(32));
  84   set_omv_count(0);
  85 
  86 #ifdef ASSERT
  87   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
  88   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
  89   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
  90 #endif
  91 }
  92 
  93 
  94 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
  95   // This constructor does a deep copy
  96   // of the source OopMap.
  97   set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
  98   set_omv_count(0);
  99   set_offset(source->offset());
 100 
 101 #ifdef ASSERT
 102   _locs_length = source->_locs_length;
 103   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
 104   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
 105 #endif
 106 
 107   // We need to copy the entries too.
 108   for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
 109     OopMapValue omv = oms.current();
 110     omv.write_on(write_stream());
 111     increment_count();
 112   }
 113 }
 114 
 115 
 116 OopMap* OopMap::deep_copy() {
 117   return new OopMap(_deep_copy_token, this);
 118 }
 119 
 120 void OopMap::copy_data_to(address addr) const {
 121   memcpy(addr, write_stream()->buffer(), write_stream()->position());
 122 }
 123 
 124 int OopMap::heap_size() const {
 125   int size = sizeof(OopMap);
 126   int align = sizeof(void *) - 1;
 127   size += write_stream()->position();
 128   // Align to a reasonable ending point
 129   size = ((size+align) & ~align);
 130   return size;
 131 }
 132 
 133 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
 134 // slots to hold 4-byte values like ints and floats in the LP64 build.
 135 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
 136 
 137   assert(reg->value() < _locs_length, "too big reg value for stack size");
 138   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
 139   debug_only( _locs_used[reg->value()] = x; )
 140 
 141   OopMapValue o(reg, x);
 142 
 143   if(x == OopMapValue::callee_saved_value) {
 144     // This can never be a stack location, so we don't need to transform it.
 145     assert(optional->is_reg(), "Trying to callee save a stack location");
 146     o.set_content_reg(optional);
 147   } else if(x == OopMapValue::derived_oop_value) {
 148     o.set_content_reg(optional);
 149   }
 150 
 151   o.write_on(write_stream());
 152   increment_count();
 153 }
 154 
 155 
 156 void OopMap::set_oop(VMReg reg) {
 157   set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
 158 }
 159 
 160 
 161 void OopMap::set_value(VMReg reg) {
 162   // At this time, we don't need value entries in our OopMap.
 163 }
 164 
 165 
 166 void OopMap::set_narrowoop(VMReg reg) {
 167   set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
 168 }
 169 
 170 
 171 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
 172   set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
 173 }
 174 
 175 
 176 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
 177   if( reg == derived_from_local_register ) {
 178     // Actually an oop, derived shares storage with base,
 179     set_oop(reg);
 180   } else {
 181     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
 182   }
 183 }
 184 
 185 // OopMapSet
 186 
 187 OopMapSet::OopMapSet() {
 188   set_om_size(MinOopMapAllocation);
 189   set_om_count(0);
 190   OopMap** temp = NEW_RESOURCE_ARRAY(OopMap*, om_size());
 191   set_om_data(temp);
 192 }
 193 
 194 
 195 void OopMapSet::grow_om_data() {
 196   int new_size = om_size() * 2;
 197   OopMap** new_data = NEW_RESOURCE_ARRAY(OopMap*, new_size);
 198   memcpy(new_data,om_data(),om_size() * sizeof(OopMap*));
 199   set_om_size(new_size);
 200   set_om_data(new_data);
 201 }
 202 
 203 void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
 204   assert(om_size() != -1,"Cannot grow a fixed OopMapSet");
 205 
 206   if(om_count() >= om_size()) {
 207     grow_om_data();
 208   }
 209   map->set_offset(pc_offset);
 210 
 211 #ifdef ASSERT
 212   if(om_count() > 0) {
 213     OopMap* last = at(om_count()-1);
 214     if (last->offset() == map->offset() ) {
 215       fatal("OopMap inserted twice");
 216     }
 217     if(last->offset() > map->offset()) {
 218       tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
 219                       om_count(),last->offset(),om_count()+1,map->offset());
 220     }
 221   }
 222 #endif // ASSERT
 223 
 224   set(om_count(),map);
 225   increment_count();
 226 }
 227 
 228 
 229 int OopMapSet::heap_size() const {
 230   // The space we use
 231   int size = sizeof(OopMap);
 232   int align = sizeof(void *) - 1;
 233   size = ((size+align) & ~align);
 234   size += om_count() * sizeof(OopMap*);
 235 
 236   // Now add in the space needed for the indivdiual OopMaps
 237   for(int i=0; i < om_count(); i++) {
 238     size += at(i)->heap_size();
 239   }
 240   // We don't need to align this, it will be naturally pointer aligned
 241   return size;
 242 }
 243 
 244 
 245 OopMap* OopMapSet::singular_oop_map() {
 246   guarantee(om_count() == 1, "Make sure we only have a single gc point");
 247   return at(0);
 248 }
 249 
 250 
 251 OopMap* OopMapSet::find_map_at_offset(int pc_offset) const {
 252   int i, len = om_count();
 253   assert( len > 0, "must have pointer maps" );
 254 
 255   // Scan through oopmaps. Stop when current offset is either equal or greater
 256   // than the one we are looking for.
 257   for( i = 0; i < len; i++) {
 258     if( at(i)->offset() >= pc_offset )
 259       break;
 260   }
 261 
 262   assert( i < len, "oopmap not found" );
 263 
 264   OopMap* m = at(i);
 265   assert( m->offset() == pc_offset, "oopmap not found" );
 266   return m;
 267 }
 268 
 269 static void add_derived_oop(oop* base, oop* derived) {
 270 #if !defined(TIERED) && !INCLUDE_JVMCI
 271   COMPILER1_PRESENT(ShouldNotReachHere();)
 272 #endif // !defined(TIERED) && !INCLUDE_JVMCI
 273 #if COMPILER2_OR_JVMCI
 274   DerivedPointerTable::add(derived, base);
 275 #endif // COMPILER2_OR_JVMCI
 276 }
 277 
 278 
 279 #ifndef PRODUCT
 280 static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
 281   // Print oopmap and regmap
 282   tty->print_cr("------ ");
 283   CodeBlob* cb = fr->cb();
 284   const ImmutableOopMapSet* maps = cb->oop_maps();
 285   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 286   map->print();
 287   if( cb->is_nmethod() ) {
 288     nmethod* nm = (nmethod*)cb;
 289     // native wrappers have no scope data, it is implied
 290     if (nm->is_native_method()) {
 291       tty->print("bci: 0 (native)");
 292     } else {
 293       ScopeDesc* scope  = nm->scope_desc_at(fr->pc());
 294       tty->print("bci: %d ",scope->bci());
 295     }
 296   }
 297   tty->cr();
 298   fr->print_on(tty);
 299   tty->print("     ");
 300   cb->print_value_on(tty);  tty->cr();
 301   reg_map->print();
 302   tty->print_cr("------ ");
 303 
 304 }
 305 #endif // PRODUCT
 306 
 307 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
 308   // add derived oops to a table
 309   all_do(fr, reg_map, f, add_derived_oop, &do_nothing_cl);
 310 }
 311 
 312 
 313 void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
 314                        OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
 315                        OopClosure* value_fn) {
 316   CodeBlob* cb = fr->cb();
 317   assert(cb != NULL, "no codeblob");
 318 
 319   NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
 320 
 321   const ImmutableOopMapSet* maps = cb->oop_maps();
 322   const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
 323   assert(map != NULL, "no ptr map found");
 324 
 325   // handle derived pointers first (otherwise base pointer may be
 326   // changed before derived pointer offset has been collected)
 327   OopMapValue omv;
 328   {
 329     OopMapStream oms(map,OopMapValue::derived_oop_value);
 330     if (!oms.is_done()) {
 331 #ifndef TIERED
 332       COMPILER1_PRESENT(ShouldNotReachHere();)
 333 #if INCLUDE_JVMCI
 334       if (UseJVMCICompiler) {
 335         ShouldNotReachHere();
 336       }
 337 #endif
 338 #endif // !TIERED
 339       // Protect the operation on the derived pointers.  This
 340       // protects the addition of derived pointers to the shared
 341       // derived pointer table in DerivedPointerTable::add().
 342       MutexLockerEx x(DerivedPointerTableGC_lock, Mutex::_no_safepoint_check_flag);
 343       do {
 344         omv = oms.current();
 345         oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
 346         guarantee(loc != NULL, "missing saved register");
 347         oop *derived_loc = loc;
 348         oop *base_loc    = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
 349         // Ignore NULL oops and decoded NULL narrow oops which
 350         // equal to Universe::narrow_oop_base when a narrow oop
 351         // implicit null check is used in compiled code.
 352         // The narrow_oop_base could be NULL or be the address
 353         // of the page below heap depending on compressed oops mode.
 354         if (base_loc != NULL && *base_loc != (oop)NULL && !Universe::is_narrow_oop_base(*base_loc)) {
 355           derived_oop_fn(base_loc, derived_loc);
 356         }
 357         oms.next();
 358       }  while (!oms.is_done());
 359     }
 360   }
 361 
 362   // We want coop and oop oop_types
 363   int mask = OopMapValue::oop_value | OopMapValue::narrowoop_value;
 364   {
 365     for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {
 366       omv = oms.current();
 367       oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
 368       // It should be an error if no location can be found for a
 369       // register mentioned as contained an oop of some kind.  Maybe
 370       // this was allowed previously because value_value items might
 371       // be missing?
 372       guarantee(loc != NULL, "missing saved register");
 373       if ( omv.type() == OopMapValue::oop_value ) {
 374         oop val = *loc;
 375         if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
 376           // Ignore NULL oops and decoded NULL narrow oops which
 377           // equal to Universe::narrow_oop_base when a narrow oop
 378           // implicit null check is used in compiled code.
 379           // The narrow_oop_base could be NULL or be the address
 380           // of the page below heap depending on compressed oops mode.
 381           continue;
 382         }
 383 #ifdef ASSERT
 384         // We can not verify the oop here if we are using ZGC, the oop
 385         // will be bad in case we had a safepoint between a load and a
 386         // load barrier.
 387         if (!UseZGC &&
 388             ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
 389                 (!Universe::heap()->is_in_or_null(*loc)))) {
 390           tty->print_cr("# Found non oop pointer.  Dumping state at failure");
 391           // try to dump out some helpful debugging information
 392           trace_codeblob_maps(fr, reg_map);
 393           omv.print();
 394           tty->print_cr("register r");
 395           omv.reg()->print();
 396           tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
 397           // do the real assert.
 398           assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
 399         }
 400 #endif // ASSERT
 401         oop_fn->do_oop(loc);
 402       } else if ( omv.type() == OopMapValue::narrowoop_value ) {
 403         narrowOop *nl = (narrowOop*)loc;
 404 #ifndef VM_LITTLE_ENDIAN
 405         VMReg vmReg = omv.reg();
 406         // Don't do this on SPARC float registers as they can be individually addressed
 407         if (!vmReg->is_stack() SPARC_ONLY(&& !vmReg->is_FloatRegister())) {
 408           // compressed oops in registers only take up 4 bytes of an
 409           // 8 byte register but they are in the wrong part of the
 410           // word so adjust loc to point at the right place.
 411           nl = (narrowOop*)((address)nl + 4);
 412         }
 413 #endif
 414         oop_fn->do_oop(nl);
 415       }
 416     }
 417   }
 418 }
 419 
 420 
 421 // Update callee-saved register info for the following frame
 422 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
 423   ResourceMark rm;
 424   CodeBlob* cb = fr->cb();
 425   assert(cb != NULL, "no codeblob");
 426 
 427   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
 428   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
 429          "already updated this map; do not 'update' it twice!" );
 430   debug_only(reg_map->_update_for_id = fr->id());
 431 
 432   // Check if caller must update oop argument
 433   assert((reg_map->include_argument_oops() ||
 434           !cb->caller_must_gc_arguments(reg_map->thread())),
 435          "include_argument_oops should already be set");
 436 
 437   // Scan through oopmap and find location of all callee-saved registers
 438   // (we do not do update in place, since info could be overwritten)
 439 
 440   address pc = fr->pc();
 441   const ImmutableOopMap* map  = cb->oop_map_for_return_address(pc);
 442   assert(map != NULL, "no ptr map found");
 443   DEBUG_ONLY(int nof_callee = 0;)
 444 
 445   for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
 446     OopMapValue omv = oms.current();
 447     VMReg reg = omv.content_reg();
 448     oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
 449     reg_map->set_location(reg, (address) loc);
 450     DEBUG_ONLY(nof_callee++;)
 451   }
 452 
 453   // Check that runtime stubs save all callee-saved registers
 454 #ifdef COMPILER2
 455   assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
 456          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
 457          "must save all");
 458 #endif // COMPILER2
 459 }
 460 
 461 //=============================================================================
 462 // Non-Product code
 463 
 464 #ifndef PRODUCT
 465 
 466 bool ImmutableOopMap::has_derived_pointer() const {
 467 #if !defined(TIERED) && !INCLUDE_JVMCI
 468   COMPILER1_PRESENT(return false);
 469 #endif // !TIERED
 470 #if COMPILER2_OR_JVMCI
 471   OopMapStream oms(this,OopMapValue::derived_oop_value);
 472   return oms.is_done();
 473 #else
 474   return false;
 475 #endif // COMPILER2_OR_JVMCI
 476 }
 477 
 478 #endif //PRODUCT
 479 
 480 // Printing code is present in product build for -XX:+PrintAssembly.
 481 
 482 static
 483 void print_register_type(OopMapValue::oop_types x, VMReg optional,
 484                          outputStream* st) {
 485   switch( x ) {
 486   case OopMapValue::oop_value:
 487     st->print("Oop");
 488     break;
 489   case OopMapValue::narrowoop_value:
 490     st->print("NarrowOop");
 491     break;
 492   case OopMapValue::callee_saved_value:
 493     st->print("Callers_");
 494     optional->print_on(st);
 495     break;
 496   case OopMapValue::derived_oop_value:
 497     st->print("Derived_oop_");
 498     optional->print_on(st);
 499     break;
 500   default:
 501     ShouldNotReachHere();
 502   }
 503 }
 504 
 505 void OopMapValue::print_on(outputStream* st) const {
 506   reg()->print_on(st);
 507   st->print("=");
 508   print_register_type(type(),content_reg(),st);
 509   st->print(" ");
 510 }
 511 
 512 void ImmutableOopMap::print_on(outputStream* st) const {
 513   OopMapValue omv;
 514   st->print("ImmutableOopMap{");
 515   for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
 516     omv = oms.current();
 517     omv.print_on(st);
 518   }
 519   st->print("}");
 520 }
 521 
 522 void OopMap::print_on(outputStream* st) const {
 523   OopMapValue omv;
 524   st->print("OopMap{");
 525   for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
 526     omv = oms.current();
 527     omv.print_on(st);
 528   }
 529   st->print("off=%d}", (int) offset());
 530 }
 531 
 532 void ImmutableOopMapSet::print_on(outputStream* st) const {
 533   const ImmutableOopMap* last = NULL;
 534   for (int i = 0; i < _count; ++i) {
 535     const ImmutableOopMapPair* pair = pair_at(i);
 536     const ImmutableOopMap* map = pair->get_from(this);
 537     if (map != last) {
 538       st->cr();
 539       map->print_on(st);
 540       st->print("pc offsets: ");
 541     }
 542     last = map;
 543     st->print("%d ", pair->pc_offset());
 544   }
 545 }
 546 
 547 void OopMapSet::print_on(outputStream* st) const {
 548   int i, len = om_count();
 549 
 550   st->print_cr("OopMapSet contains %d OopMaps\n",len);
 551 
 552   for( i = 0; i < len; i++) {
 553     OopMap* m = at(i);
 554     st->print_cr("#%d ",i);
 555     m->print_on(st);
 556     st->cr();
 557   }
 558 }
 559 
 560 bool OopMap::equals(const OopMap* other) const {
 561   if (other->_omv_count != _omv_count) {
 562     return false;
 563   }
 564   if (other->write_stream()->position() != write_stream()->position()) {
 565     return false;
 566   }
 567   if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
 568     return false;
 569   }
 570   return true;
 571 }
 572 
 573 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
 574   ImmutableOopMapPair* pairs = get_pairs();
 575 
 576   int i;
 577   for (i = 0; i < _count; ++i) {
 578     if (pairs[i].pc_offset() >= pc_offset) {
 579       break;
 580     }
 581   }
 582   ImmutableOopMapPair* last = &pairs[i];
 583 
 584   assert(last->pc_offset() == pc_offset, "oopmap not found");
 585   return last->get_from(this);
 586 }
 587 
 588 const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const {
 589   return set->oopmap_at_offset(_oopmap_offset);
 590 }
 591 
 592 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) {
 593   address addr = data_addr();
 594   oopmap->copy_data_to(addr);
 595 }
 596 
 597 #ifdef ASSERT
 598 int ImmutableOopMap::nr_of_bytes() const {
 599   OopMapStream oms(this);
 600 
 601   while (!oms.is_done()) {
 602     oms.next();
 603   }
 604   return sizeof(ImmutableOopMap) + oms.stream_position();
 605 }
 606 #endif
 607 
 608 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
 609   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
 610 }
 611 
 612 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
 613   return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
 614 }
 615 
 616 int ImmutableOopMapBuilder::heap_size() {
 617   int base = sizeof(ImmutableOopMapSet);
 618   base = align_up(base, 8);
 619 
 620   // all of ours pc / offset pairs
 621   int pairs = _set->size() * sizeof(ImmutableOopMapPair);
 622   pairs = align_up(pairs, 8);
 623 
 624   for (int i = 0; i < _set->size(); ++i) {
 625     int size = 0;
 626     OopMap* map = _set->at(i);
 627 
 628     if (is_empty(map)) {
 629       /* only keep a single empty map in the set */
 630       if (has_empty()) {
 631         _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
 632       } else {
 633         _empty_offset = _offset;
 634         _empty = map;
 635         size = size_for(map);
 636         _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
 637       }
 638     } else if (is_last_duplicate(map)) {
 639       /* if this entry is identical to the previous one, just point it there */
 640       _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
 641     } else {
 642       /* not empty, not an identical copy of the previous entry */
 643       size = size_for(map);
 644       _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
 645       _last_offset = _offset;
 646       _last = map;
 647     }
 648 
 649     assert(_mapping[i]._map == map, "check");
 650     _offset += size;
 651   }
 652 
 653   int total = base + pairs + _offset;
 654   DEBUG_ONLY(total += 8);
 655   _required = total;
 656   return total;
 657 }
 658 
 659 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
 660   assert(offset < set->nr_of_bytes(), "check");
 661   new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
 662 }
 663 
 664 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
 665   fill_pair(pair, map, offset, set);
 666   address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
 667 
 668   new (addr) ImmutableOopMap(map);
 669   return size_for(map);
 670 }
 671 
 672 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
 673   ImmutableOopMapPair* pairs = set->get_pairs();
 674 
 675   for (int i = 0; i < set->count(); ++i) {
 676     const OopMap* map = _mapping[i]._map;
 677     ImmutableOopMapPair* pair = NULL;
 678     int size = 0;
 679 
 680     if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
 681       size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
 682     } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
 683       fill_pair(&pairs[i], map, _mapping[i]._offset, set);
 684     }
 685 
 686     const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
 687     assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
 688   }
 689 }
 690 
 691 #ifdef ASSERT
 692 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
 693   for (int i = 0; i < 8; ++i) {
 694     assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
 695   }
 696 
 697   for (int i = 0; i < set->count(); ++i) {
 698     const ImmutableOopMapPair* pair = set->pair_at(i);
 699     assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
 700     const ImmutableOopMap* map = pair->get_from(set);
 701     int nr_of_bytes = map->nr_of_bytes();
 702     assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
 703   }
 704 }
 705 #endif
 706 
 707 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
 708   DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
 709 
 710   _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
 711   fill(_new_set, _required);
 712 
 713   DEBUG_ONLY(verify(buffer, _required, _new_set));
 714 
 715   return _new_set;
 716 }
 717 
 718 ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
 719   _required = heap_size();
 720 
 721   // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
 722   address buffer = (address) NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
 723   return generate_into(buffer);
 724 }
 725 
 726 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
 727   ResourceMark mark;
 728   ImmutableOopMapBuilder builder(oopmap_set);
 729   return builder.build();
 730 }
 731 
 732 
 733 //------------------------------DerivedPointerTable---------------------------
 734 
 735 #if COMPILER2_OR_JVMCI
 736 
 737 class DerivedPointerEntry : public CHeapObj<mtCompiler> {
 738  private:
 739   oop*     _location; // Location of derived pointer (also pointing to the base)
 740   intptr_t _offset;   // Offset from base pointer
 741  public:
 742   DerivedPointerEntry(oop* location, intptr_t offset) { _location = location; _offset = offset; }
 743   oop* location()    { return _location; }
 744   intptr_t  offset() { return _offset; }
 745 };
 746 
 747 
 748 GrowableArray<DerivedPointerEntry*>* DerivedPointerTable::_list = NULL;
 749 bool DerivedPointerTable::_active = false;
 750 
 751 
 752 void DerivedPointerTable::clear() {
 753   // The first time, we create the list.  Otherwise it should be
 754   // empty.  If not, then we have probably forgotton to call
 755   // update_pointers after last GC/Scavenge.
 756   assert (!_active, "should not be active");
 757   assert(_list == NULL || _list->length() == 0, "table not empty");
 758   if (_list == NULL) {
 759     _list = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<DerivedPointerEntry*>(10, true); // Allocated on C heap
 760   }
 761   _active = true;
 762 }
 763 
 764 
 765 // Returns value of location as an int
 766 intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); }
 767 
 768 
 769 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
 770   assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
 771   assert(derived_loc != base_loc, "Base and derived in same location");
 772   if (_active) {
 773     assert(*derived_loc != (oop)base_loc, "location already added");
 774     assert(_list != NULL, "list must exist");
 775     intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
 776     // This assert is invalid because derived pointers can be
 777     // arbitrarily far away from their base.
 778     // assert(offset >= -1000000, "wrong derived pointer info");
 779 
 780     if (TraceDerivedPointers) {
 781       tty->print_cr(
 782         "Add derived pointer@" INTPTR_FORMAT
 783         " - Derived: " INTPTR_FORMAT
 784         " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
 785         p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset
 786       );
 787     }
 788     // Set derived oop location to point to base.
 789     *derived_loc = (oop)base_loc;
 790     assert_lock_strong(DerivedPointerTableGC_lock);
 791     DerivedPointerEntry *entry = new DerivedPointerEntry(derived_loc, offset);
 792     _list->append(entry);
 793   }
 794 }
 795 
 796 
 797 void DerivedPointerTable::update_pointers() {
 798   assert(_list != NULL, "list must exist");
 799   for(int i = 0; i < _list->length(); i++) {
 800     DerivedPointerEntry* entry = _list->at(i);
 801     oop* derived_loc = entry->location();
 802     intptr_t offset  = entry->offset();
 803     // The derived oop was setup to point to location of base
 804     oop  base        = **(oop**)derived_loc;
 805     assert(Universe::heap()->is_in_or_null(base), "must be an oop");
 806 
 807     *derived_loc = (oop)(((address)base) + offset);
 808     assert(value_of_loc(derived_loc) - value_of_loc(&base) == offset, "sanity check");
 809 
 810     if (TraceDerivedPointers) {
 811       tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
 812                     " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
 813           p2i(derived_loc), p2i((address)*derived_loc), p2i((address)base), offset);
 814     }
 815 
 816     // Delete entry
 817     delete entry;
 818     _list->at_put(i, NULL);
 819   }
 820   // Clear list, so it is ready for next traversal (this is an invariant)
 821   if (TraceDerivedPointers && !_list->is_empty()) {
 822     tty->print_cr("--------------------------");
 823   }
 824   _list->clear();
 825   _active = false;
 826 }
 827 
 828 #endif // COMPILER2_OR_JVMCI