1 /*
   2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_oopMapCache.cpp.incl"
  27 
  28 class OopMapCacheEntry: private InterpreterOopMap {
  29   friend class InterpreterOopMap;
  30   friend class OopMapForCacheEntry;
  31   friend class OopMapCache;
  32   friend class VerifyClosure;
  33 
  34  protected:
  35   // Initialization
  36   void fill(methodHandle method, int bci);
  37   // fills the bit mask for native calls
  38   void fill_for_native(methodHandle method);
  39   void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);
  40 
  41   // Deallocate bit masks and initialize fields
  42   void flush();
  43 
  44  private:
  45   void allocate_bit_mask();   // allocates the bit mask on C heap f necessary
  46   void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
  47   bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
  48 
  49  public:
  50   OopMapCacheEntry() : InterpreterOopMap() {
  51 #ifdef ASSERT
  52      _resource_allocate_bit_mask = false;
  53 #endif
  54   }
  55 };
  56 
  57 
  58 // Implementation of OopMapForCacheEntry
  59 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
  60 
  61 class OopMapForCacheEntry: public GenerateOopMap {
  62   OopMapCacheEntry *_entry;
  63   int               _bci;
  64   int               _stack_top;
  65 
  66   virtual bool report_results() const     { return false; }
  67   virtual bool possible_gc_point          (BytecodeStream *bcs);
  68   virtual void fill_stackmap_prolog       (int nof_gc_points);
  69   virtual void fill_stackmap_epilog       ();
  70   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
  71                                            CellTypeState* vars,
  72                                            CellTypeState* stack,
  73                                            int stack_top);
  74   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars);
  75 
  76  public:
  77   OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry);
  78 
  79   // Computes stack map for (method,bci) and initialize entry
  80   void compute_map(TRAPS);
  81   int  size();
  82 };
  83 
  84 
  85 OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
  86   _bci       = bci;
  87   _entry     = entry;
  88   _stack_top = -1;
  89 }
  90 
  91 
  92 void OopMapForCacheEntry::compute_map(TRAPS) {
  93   assert(!method()->is_native(), "cannot compute oop map for native methods");
  94   // First check if it is a method where the stackmap is always empty
  95   if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
  96     _entry->set_mask_size(0);
  97   } else {
  98     ResourceMark rm;
  99     GenerateOopMap::compute_map(CATCH);
 100     result_for_basicblock(_bci);
 101   }
 102 }
 103 
 104 
 105 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {
 106   return false; // We are not reporting any result. We call result_for_basicblock directly
 107 }
 108 
 109 
 110 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) {
 111   // Do nothing
 112 }
 113 
 114 
 115 void OopMapForCacheEntry::fill_stackmap_epilog() {
 116   // Do nothing
 117 }
 118 
 119 
 120 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) {
 121   // Do nothing
 122 }
 123 
 124 
 125 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,
 126                                                     CellTypeState* vars,
 127                                                     CellTypeState* stack,
 128                                                     int stack_top) {
 129   // Only interested in one specific bci
 130   if (bcs->bci() == _bci) {
 131     _entry->set_mask(vars, stack, stack_top);
 132     _stack_top = stack_top;
 133   }
 134 }
 135 
 136 
 137 int OopMapForCacheEntry::size() {
 138   assert(_stack_top != -1, "compute_map must be called first");
 139   return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top;
 140 }
 141 
 142 
 143 // Implementation of InterpreterOopMap and OopMapCacheEntry
 144 
 145 class VerifyClosure : public OffsetClosure {
 146  private:
 147   OopMapCacheEntry* _entry;
 148   bool              _failed;
 149 
 150  public:
 151   VerifyClosure(OopMapCacheEntry* entry)         { _entry = entry; _failed = false; }
 152   void offset_do(int offset)                     { if (!_entry->is_oop(offset)) _failed = true; }
 153   bool failed() const                            { return _failed; }
 154 };
 155 
 156 InterpreterOopMap::InterpreterOopMap() {
 157   initialize();
 158 #ifdef ASSERT
 159   _resource_allocate_bit_mask = true;
 160 #endif
 161 }
 162 
 163 InterpreterOopMap::~InterpreterOopMap() {
 164   // The expection is that the bit mask was allocated
 165   // last in this resource area.  That would make the free of the
 166   // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free).
 167   // If it was not allocated last, there is not a correctness problem
 168   // but the space for the bit_mask is not freed.
 169   assert(_resource_allocate_bit_mask, "Trying to free C heap space");
 170   if (mask_size() > small_mask_limit) {
 171     FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size());
 172   }
 173 }
 174 
 175 bool InterpreterOopMap::is_empty() {
 176   bool result = _method == NULL;
 177   assert(_method != NULL || (_bci == 0 &&
 178     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
 179     _bit_mask[0] == 0), "Should be completely empty");
 180   return result;
 181 }
 182 
 183 void InterpreterOopMap::initialize() {
 184   _method    = NULL;
 185   _mask_size = USHRT_MAX;  // This value should cause a failure quickly
 186   _bci       = 0;
 187   _expression_stack_size = 0;
 188   for (int i = 0; i < N; i++) _bit_mask[i] = 0;
 189 }
 190 
 191 
 192 void InterpreterOopMap::oop_iterate(OopClosure *blk) {
 193   if (method() != NULL) {
 194     blk->do_oop((oop*) &_method);
 195   }
 196 }
 197 
 198 void InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) {
 199   if (method() != NULL && mr.contains(&_method)) {
 200     blk->do_oop((oop*) &_method);
 201   }
 202 }
 203 
 204 
 205 
 206 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) {
 207   int n = number_of_entries();
 208   int word_index = 0;
 209   uintptr_t value = 0;
 210   uintptr_t mask = 0;
 211   // iterate over entries
 212   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
 213     // get current word
 214     if (mask == 0) {
 215       value = bit_mask()[word_index++];
 216       mask = 1;
 217     }
 218     // test for oop
 219     if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i);
 220   }
 221 }
 222 
 223 void InterpreterOopMap::verify() {
 224   // If we are doing mark sweep _method may not have a valid header
 225   // $$$ This used to happen only for m/s collections; we might want to
 226   // think of an appropriate generalization of this distinction.
 227   guarantee(Universe::heap()->is_gc_active() || _method->is_oop_or_null(),
 228             "invalid oop in oopMapCache");
 229 }
 230 
 231 #ifdef ENABLE_ZAP_DEAD_LOCALS
 232 
 233 void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) {
 234   int n = number_of_entries();
 235   int word_index = 0;
 236   uintptr_t value = 0;
 237   uintptr_t mask = 0;
 238   // iterate over entries
 239   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
 240     // get current word
 241     if (mask == 0) {
 242       value = bit_mask()[word_index++];
 243       mask = 1;
 244     }
 245     // test for dead values  & oops, and for live values
 246          if ((value & (mask << dead_bit_number)) != 0)  dead_closure->offset_do(i); // call this for all dead values or oops
 247     else if ((value & (mask <<  oop_bit_number)) != 0)   oop_closure->offset_do(i); // call this for all live oops
 248     else                                               value_closure->offset_do(i); // call this for all live values
 249   }
 250 }
 251 
 252 #endif
 253 
 254 
 255 void InterpreterOopMap::print() {
 256   int n = number_of_entries();
 257   tty->print("oop map for ");
 258   method()->print_value();
 259   tty->print(" @ %d = [%d] { ", bci(), n);
 260   for (int i = 0; i < n; i++) {
 261 #ifdef ENABLE_ZAP_DEAD_LOCALS
 262     if (is_dead(i)) tty->print("%d+ ", i);
 263     else
 264 #endif
 265     if (is_oop(i)) tty->print("%d ", i);
 266   }
 267   tty->print_cr("}");
 268 }
 269 
 270 class MaskFillerForNative: public NativeSignatureIterator {
 271  private:
 272   uintptr_t * _mask;                             // the bit mask to be filled
 273   int         _size;                             // the mask size in bits
 274 
 275   void set_one(int i) {
 276     i *= InterpreterOopMap::bits_per_entry;
 277     assert(0 <= i && i < _size, "offset out of bounds");
 278     _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
 279   }
 280 
 281  public:
 282   void pass_int()                                { /* ignore */ }
 283   void pass_long()                               { /* ignore */ }
 284   void pass_float()                              { /* ignore */ }
 285   void pass_double()                             { /* ignore */ }
 286   void pass_object()                             { set_one(offset()); }
 287 
 288   MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
 289     _mask   = mask;
 290     _size   = size;
 291     // initialize with 0
 292     int i = (size + BitsPerWord - 1) / BitsPerWord;
 293     while (i-- > 0) _mask[i] = 0;
 294   }
 295 
 296   void generate() {
 297     NativeSignatureIterator::iterate();
 298   }
 299 };
 300 
 301 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
 302   // Check mask includes map
 303   VerifyClosure blk(this);
 304   iterate_oop(&blk);
 305   if (blk.failed()) return false;
 306 
 307   // Check if map is generated correctly
 308   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
 309   if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
 310 
 311   for(int i = 0; i < max_locals; i++) {
 312     bool v1 = is_oop(i)               ? true : false;
 313     bool v2 = vars[i].is_reference()  ? true : false;
 314     assert(v1 == v2, "locals oop mask generation error");
 315     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
 316 #ifdef ENABLE_ZAP_DEAD_LOCALS
 317     bool v3 = is_dead(i)              ? true : false;
 318     bool v4 = !vars[i].is_live()      ? true : false;
 319     assert(v3 == v4, "locals live mask generation error");
 320     assert(!(v1 && v3), "dead value marked as oop");
 321 #endif
 322   }
 323 
 324   if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
 325   for(int j = 0; j < stack_top; j++) {
 326     bool v1 = is_oop(max_locals + j)  ? true : false;
 327     bool v2 = stack[j].is_reference() ? true : false;
 328     assert(v1 == v2, "stack oop mask generation error");
 329     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
 330 #ifdef ENABLE_ZAP_DEAD_LOCALS
 331     bool v3 = is_dead(max_locals + j) ? true : false;
 332     bool v4 = !stack[j].is_live()     ? true : false;
 333     assert(v3 == v4, "stack live mask generation error");
 334     assert(!(v1 && v3), "dead value marked as oop");
 335 #endif
 336   }
 337   if (TraceOopMapGeneration && Verbose) tty->cr();
 338   return true;
 339 }
 340 
 341 void OopMapCacheEntry::allocate_bit_mask() {
 342   if (mask_size() > small_mask_limit) {
 343     assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
 344     _bit_mask[0] = (intptr_t)
 345       NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size());
 346   }
 347 }
 348 
 349 void OopMapCacheEntry::deallocate_bit_mask() {
 350   if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
 351     assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
 352       "This bit mask should not be in the resource area");
 353     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
 354     debug_only(_bit_mask[0] = 0;)
 355   }
 356 }
 357 
 358 
 359 void OopMapCacheEntry::fill_for_native(methodHandle mh) {
 360   assert(mh->is_native(), "method must be native method");
 361   set_mask_size(mh->size_of_parameters() * bits_per_entry);
 362   allocate_bit_mask();
 363   // fill mask for parameters
 364   MaskFillerForNative mf(mh, bit_mask(), mask_size());
 365   mf.generate();
 366 }
 367 
 368 
 369 void OopMapCacheEntry::fill(methodHandle method, int bci) {
 370   HandleMark hm;
 371   // Flush entry to deallocate an existing entry
 372   flush();
 373   set_method(method());
 374   set_bci(bci);
 375   if (method->is_native()) {
 376     // Native method activations have oops only among the parameters and one
 377     // extra oop following the parameters (the mirror for static native methods).
 378     fill_for_native(method);
 379   } else {
 380     EXCEPTION_MARK;
 381     OopMapForCacheEntry gen(method, bci, this);
 382     gen.compute_map(CATCH);
 383   }
 384   #ifdef ASSERT
 385     verify();
 386   #endif
 387 }
 388 
 389 
 390 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) {
 391   // compute bit mask size
 392   int max_locals = method()->max_locals();
 393   int n_entries = max_locals + stack_top;
 394   set_mask_size(n_entries * bits_per_entry);
 395   allocate_bit_mask();
 396   set_expression_stack_size(stack_top);
 397 
 398   // compute bits
 399   int word_index = 0;
 400   uintptr_t value = 0;
 401   uintptr_t mask = 1;
 402 
 403   CellTypeState* cell = vars;
 404   for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
 405     // store last word
 406     if (mask == 0) {
 407       bit_mask()[word_index++] = value;
 408       value = 0;
 409       mask = 1;
 410     }
 411 
 412     // switch to stack when done with locals
 413     if (entry_index == max_locals) {
 414       cell = stack;
 415     }
 416 
 417     // set oop bit
 418     if ( cell->is_reference()) {
 419       value |= (mask << oop_bit_number );
 420     }
 421 
 422   #ifdef ENABLE_ZAP_DEAD_LOCALS
 423     // set dead bit
 424     if (!cell->is_live()) {
 425       value |= (mask << dead_bit_number);
 426       assert(!cell->is_reference(), "dead value marked as oop");
 427     }
 428   #endif
 429   }
 430 
 431   // make sure last word is stored
 432   bit_mask()[word_index] = value;
 433 
 434   // verify bit mask
 435   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
 436 
 437 
 438 }
 439 
 440 void OopMapCacheEntry::flush() {
 441   deallocate_bit_mask();
 442   initialize();
 443 }
 444 
 445 
 446 // Implementation of OopMapCache
 447 
 448 #ifndef PRODUCT
 449 
 450 static long _total_memory_usage = 0;
 451 
 452 long OopMapCache::memory_usage() {
 453   return _total_memory_usage;
 454 }
 455 
 456 #endif
 457 
 458 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
 459   assert(_resource_allocate_bit_mask,
 460     "Should not resource allocate the _bit_mask");
 461   assert(from->method()->is_oop(), "MethodOop is bad");
 462 
 463   set_method(from->method());
 464   set_bci(from->bci());
 465   set_mask_size(from->mask_size());
 466   set_expression_stack_size(from->expression_stack_size());
 467 
 468   // Is the bit mask contained in the entry?
 469   if (from->mask_size() <= small_mask_limit) {
 470     memcpy((void *)_bit_mask, (void *)from->_bit_mask,
 471       mask_word_size() * BytesPerWord);
 472   } else {
 473     // The expectation is that this InterpreterOopMap is a recently created
 474     // and empty. It is used to get a copy of a cached entry.
 475     // If the bit mask has a value, it should be in the
 476     // resource area.
 477     assert(_bit_mask[0] == 0 ||
 478       Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
 479       "The bit mask should have been allocated from a resource area");
 480     // Allocate the bit_mask from a Resource area for performance.  Allocating
 481     // from the C heap as is done for OopMapCache has a significant
 482     // performance impact.
 483     _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size());
 484     assert(_bit_mask[0] != 0, "bit mask was not allocated");
 485     memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0],
 486       mask_word_size() * BytesPerWord);
 487   }
 488 }
 489 
 490 inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) {
 491   // We use method->code_size() rather than method->identity_hash() below since
 492   // the mark may not be present if a pointer to the method is already reversed.
 493   return   ((unsigned int) bci)
 494          ^ ((unsigned int) method->max_locals()         << 2)
 495          ^ ((unsigned int) method->code_size()          << 4)
 496          ^ ((unsigned int) method->size_of_parameters() << 6);
 497 }
 498 
 499 
 500 OopMapCache::OopMapCache() :
 501   _mut(Mutex::leaf, "An OopMapCache lock", true)
 502 {
 503   _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size);
 504   // Cannot call flush for initialization, since flush
 505   // will check if memory should be deallocated
 506   for(int i = 0; i < _size; i++) _array[i].initialize();
 507   NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
 508 }
 509 
 510 
 511 OopMapCache::~OopMapCache() {
 512   assert(_array != NULL, "sanity check");
 513   // Deallocate oop maps that are allocated out-of-line
 514   flush();
 515   // Deallocate array
 516   NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
 517   FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
 518 }
 519 
 520 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
 521   return &_array[i % _size];
 522 }
 523 
 524 void OopMapCache::flush() {
 525   for (int i = 0; i < _size; i++) _array[i].flush();
 526 }
 527 
 528 void OopMapCache::flush_obsolete_entries() {
 529   for (int i = 0; i < _size; i++)
 530     if (!_array[i].is_empty() && _array[i].method()->is_old()) {
 531       // Cache entry is occupied by an old redefined method and we don't want
 532       // to pin it down so flush the entry.
 533       RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d",
 534         _array[i].method()->name()->as_C_string(),
 535         _array[i].method()->signature()->as_C_string(), i));
 536 
 537       _array[i].flush();
 538     }
 539 }
 540 
 541 void OopMapCache::oop_iterate(OopClosure *blk) {
 542   for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk);
 543 }
 544 
 545 void OopMapCache::oop_iterate(OopClosure *blk, MemRegion mr) {
 546     for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk, mr);
 547 }
 548 
 549 void OopMapCache::verify() {
 550   for (int i = 0; i < _size; i++) _array[i].verify();
 551 }
 552 
 553 void OopMapCache::lookup(methodHandle method,
 554                          int bci,
 555                          InterpreterOopMap* entry_for) {
 556   MutexLocker x(&_mut);
 557 
 558   OopMapCacheEntry* entry = NULL;
 559   int probe = hash_value_for(method, bci);
 560 
 561   // Search hashtable for match
 562   int i;
 563   for(i = 0; i < _probe_depth; i++) {
 564     entry = entry_at(probe + i);
 565     if (entry->match(method, bci)) {
 566       entry_for->resource_copy(entry);
 567       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 568       return;
 569     }
 570   }
 571 
 572   if (TraceOopMapGeneration) {
 573     static int count = 0;
 574     ResourceMark rm;
 575     tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
 576     method->print_value(); tty->cr();
 577   }
 578 
 579   // Entry is not in hashtable.
 580   // Compute entry and return it
 581 
 582   if (method->should_not_be_cached()) {
 583     // It is either not safe or not a good idea to cache this methodOop
 584     // at this time. We give the caller of lookup() a copy of the
 585     // interesting info via parameter entry_for, but we don't add it to
 586     // the cache. See the gory details in methodOop.cpp.
 587     compute_one_oop_map(method, bci, entry_for);
 588     return;
 589   }
 590 
 591   // First search for an empty slot
 592   for(i = 0; i < _probe_depth; i++) {
 593     entry  = entry_at(probe + i);
 594     if (entry->is_empty()) {
 595       entry->fill(method, bci);
 596       entry_for->resource_copy(entry);
 597       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 598       return;
 599     }
 600   }
 601 
 602   if (TraceOopMapGeneration) {
 603     ResourceMark rm;
 604     tty->print_cr("*** collision in oopmap cache - flushing item ***");
 605   }
 606 
 607   // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
 608   //entry_at(probe + _probe_depth - 1)->flush();
 609   //for(i = _probe_depth - 1; i > 0; i--) {
 610   //  // Coping entry[i] = entry[i-1];
 611   //  OopMapCacheEntry *to   = entry_at(probe + i);
 612   //  OopMapCacheEntry *from = entry_at(probe + i - 1);
 613   //  to->copy(from);
 614   // }
 615 
 616   assert(method->is_method(), "gaga");
 617 
 618   entry = entry_at(probe + 0);
 619   entry->fill(method, bci);
 620 
 621   // Copy the  newly cached entry to input parameter
 622   entry_for->resource_copy(entry);
 623 
 624   if (TraceOopMapGeneration) {
 625     ResourceMark rm;
 626     tty->print("Done with ");
 627     method->print_value(); tty->cr();
 628   }
 629   assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 630 
 631   return;
 632 }
 633 
 634 void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) {
 635   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
 636   OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1);
 637   tmp->initialize();
 638   tmp->fill(method, bci);
 639   entry->resource_copy(tmp);
 640   FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp);
 641 }