1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interpreter/oopMapCache.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "prims/jvmtiRedefineClassesTrace.hpp"
  31 #include "runtime/handles.inline.hpp"
  32 #include "runtime/signature.hpp"
  33 
  34 class OopMapCacheEntry: private InterpreterOopMap {
  35   friend class InterpreterOopMap;
  36   friend class OopMapForCacheEntry;
  37   friend class OopMapCache;
  38   friend class VerifyClosure;
  39 
  40  protected:
  41   // Initialization
  42   void fill(methodHandle method, int bci);
  43   // fills the bit mask for native calls
  44   void fill_for_native(methodHandle method);
  45   void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);
  46 
  47   // Deallocate bit masks and initialize fields
  48   void flush();
  49 
  50  private:
  51   void allocate_bit_mask();   // allocates the bit mask on C heap f necessary
  52   void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
  53   bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
  54 
  55  public:
  56   OopMapCacheEntry() : InterpreterOopMap() {
  57 #ifdef ASSERT
  58      _resource_allocate_bit_mask = false;
  59 #endif
  60   }
  61 };
  62 
  63 
  64 // Implementation of OopMapForCacheEntry
  65 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
  66 
  67 class OopMapForCacheEntry: public GenerateOopMap {
  68   OopMapCacheEntry *_entry;
  69   int               _bci;
  70   int               _stack_top;
  71 
  72   virtual bool report_results() const     { return false; }
  73   virtual bool possible_gc_point          (BytecodeStream *bcs);
  74   virtual void fill_stackmap_prolog       (int nof_gc_points);
  75   virtual void fill_stackmap_epilog       ();
  76   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
  77                                            CellTypeState* vars,
  78                                            CellTypeState* stack,
  79                                            int stack_top);
  80   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars);
  81 
  82  public:
  83   OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry);
  84 
  85   // Computes stack map for (method,bci) and initialize entry
  86   void compute_map(TRAPS);
  87   int  size();
  88 };
  89 
  90 
  91 OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
  92   _bci       = bci;
  93   _entry     = entry;
  94   _stack_top = -1;
  95 }
  96 
  97 
  98 void OopMapForCacheEntry::compute_map(TRAPS) {
  99   assert(!method()->is_native(), "cannot compute oop map for native methods");
 100   // First check if it is a method where the stackmap is always empty
 101   if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
 102     _entry->set_mask_size(0);
 103   } else {
 104     ResourceMark rm;
 105     GenerateOopMap::compute_map(CATCH);
 106     result_for_basicblock(_bci);
 107   }
 108 }
 109 
 110 
 111 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {
 112   return false; // We are not reporting any result. We call result_for_basicblock directly
 113 }
 114 
 115 
 116 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) {
 117   // Do nothing
 118 }
 119 
 120 
 121 void OopMapForCacheEntry::fill_stackmap_epilog() {
 122   // Do nothing
 123 }
 124 
 125 
 126 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) {
 127   // Do nothing
 128 }
 129 
 130 
 131 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,
 132                                                     CellTypeState* vars,
 133                                                     CellTypeState* stack,
 134                                                     int stack_top) {
 135   // Only interested in one specific bci
 136   if (bcs->bci() == _bci) {
 137     _entry->set_mask(vars, stack, stack_top);
 138     _stack_top = stack_top;
 139   }
 140 }
 141 
 142 
 143 int OopMapForCacheEntry::size() {
 144   assert(_stack_top != -1, "compute_map must be called first");
 145   return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top;
 146 }
 147 
 148 
 149 // Implementation of InterpreterOopMap and OopMapCacheEntry
 150 
 151 class VerifyClosure : public OffsetClosure {
 152  private:
 153   OopMapCacheEntry* _entry;
 154   bool              _failed;
 155 
 156  public:
 157   VerifyClosure(OopMapCacheEntry* entry)         { _entry = entry; _failed = false; }
 158   void offset_do(int offset)                     { if (!_entry->is_oop(offset)) _failed = true; }
 159   bool failed() const                            { return _failed; }
 160 };
 161 
 162 InterpreterOopMap::InterpreterOopMap() {
 163   initialize();
 164 #ifdef ASSERT
 165   _resource_allocate_bit_mask = true;
 166 #endif
 167 }
 168 
 169 InterpreterOopMap::~InterpreterOopMap() {
 170   // The expection is that the bit mask was allocated
 171   // last in this resource area.  That would make the free of the
 172   // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free).
 173   // If it was not allocated last, there is not a correctness problem
 174   // but the space for the bit_mask is not freed.
 175   assert(_resource_allocate_bit_mask, "Trying to free C heap space");
 176   if (mask_size() > small_mask_limit) {
 177     FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size());
 178   }
 179 }
 180 
 181 bool InterpreterOopMap::is_empty() {
 182   bool result = _method == NULL;
 183   assert(_method != NULL || (_bci == 0 &&
 184     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
 185     _bit_mask[0] == 0), "Should be completely empty");
 186   return result;
 187 }
 188 
 189 void InterpreterOopMap::initialize() {
 190   _method    = NULL;
 191   _mask_size = USHRT_MAX;  // This value should cause a failure quickly
 192   _bci       = 0;
 193   _expression_stack_size = 0;
 194   for (int i = 0; i < N; i++) _bit_mask[i] = 0;
 195 }
 196 
 197 
 198 void InterpreterOopMap::oop_iterate(OopClosure *blk) {
 199   if (method() != NULL) {
 200     blk->do_oop((oop*) &_method);
 201   }
 202 }
 203 
 204 void InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) {
 205   if (method() != NULL && mr.contains(&_method)) {
 206     blk->do_oop((oop*) &_method);
 207   }
 208 }
 209 
 210 
 211 
 212 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) {
 213   int n = number_of_entries();
 214   int word_index = 0;
 215   uintptr_t value = 0;
 216   uintptr_t mask = 0;
 217   // iterate over entries
 218   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
 219     // get current word
 220     if (mask == 0) {
 221       value = bit_mask()[word_index++];
 222       mask = 1;
 223     }
 224     // test for oop
 225     if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i);
 226   }
 227 }
 228 
 229 void InterpreterOopMap::verify() {
 230   // If we are doing mark sweep _method may not have a valid header
 231   // $$$ This used to happen only for m/s collections; we might want to
 232   // think of an appropriate generalization of this distinction.
 233   guarantee(Universe::heap()->is_gc_active() || _method->is_oop_or_null(),
 234             "invalid oop in oopMapCache");
 235 }
 236 
 237 #ifdef ENABLE_ZAP_DEAD_LOCALS
 238 
 239 void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) {
 240   int n = number_of_entries();
 241   int word_index = 0;
 242   uintptr_t value = 0;
 243   uintptr_t mask = 0;
 244   // iterate over entries
 245   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
 246     // get current word
 247     if (mask == 0) {
 248       value = bit_mask()[word_index++];
 249       mask = 1;
 250     }
 251     // test for dead values  & oops, and for live values
 252          if ((value & (mask << dead_bit_number)) != 0)  dead_closure->offset_do(i); // call this for all dead values or oops
 253     else if ((value & (mask <<  oop_bit_number)) != 0)   oop_closure->offset_do(i); // call this for all live oops
 254     else                                               value_closure->offset_do(i); // call this for all live values
 255   }
 256 }
 257 
 258 #endif
 259 
 260 
 261 void InterpreterOopMap::print() {
 262   int n = number_of_entries();
 263   tty->print("oop map for ");
 264   method()->print_value();
 265   tty->print(" @ %d = [%d] { ", bci(), n);
 266   for (int i = 0; i < n; i++) {
 267 #ifdef ENABLE_ZAP_DEAD_LOCALS
 268     if (is_dead(i)) tty->print("%d+ ", i);
 269     else
 270 #endif
 271     if (is_oop(i)) tty->print("%d ", i);
 272   }
 273   tty->print_cr("}");
 274 }
 275 
 276 class MaskFillerForNative: public NativeSignatureIterator {
 277  private:
 278   uintptr_t * _mask;                             // the bit mask to be filled
 279   int         _size;                             // the mask size in bits
 280 
 281   void set_one(int i) {
 282     i *= InterpreterOopMap::bits_per_entry;
 283     assert(0 <= i && i < _size, "offset out of bounds");
 284     _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
 285   }
 286 
 287  public:
 288   void pass_int()                                { /* ignore */ }
 289   void pass_long()                               { /* ignore */ }
 290   void pass_float()                              { /* ignore */ }
 291   void pass_double()                             { /* ignore */ }
 292   void pass_object()                             { set_one(offset()); }
 293 
 294   MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
 295     _mask   = mask;
 296     _size   = size;
 297     // initialize with 0
 298     int i = (size + BitsPerWord - 1) / BitsPerWord;
 299     while (i-- > 0) _mask[i] = 0;
 300   }
 301 
 302   void generate() {
 303     NativeSignatureIterator::iterate();
 304   }
 305 };
 306 
 307 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
 308   // Check mask includes map
 309   VerifyClosure blk(this);
 310   iterate_oop(&blk);
 311   if (blk.failed()) return false;
 312 
 313   // Check if map is generated correctly
 314   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
 315   if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
 316 
 317   for(int i = 0; i < max_locals; i++) {
 318     bool v1 = is_oop(i)               ? true : false;
 319     bool v2 = vars[i].is_reference()  ? true : false;
 320     assert(v1 == v2, "locals oop mask generation error");
 321     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
 322 #ifdef ENABLE_ZAP_DEAD_LOCALS
 323     bool v3 = is_dead(i)              ? true : false;
 324     bool v4 = !vars[i].is_live()      ? true : false;
 325     assert(v3 == v4, "locals live mask generation error");
 326     assert(!(v1 && v3), "dead value marked as oop");
 327 #endif
 328   }
 329 
 330   if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
 331   for(int j = 0; j < stack_top; j++) {
 332     bool v1 = is_oop(max_locals + j)  ? true : false;
 333     bool v2 = stack[j].is_reference() ? true : false;
 334     assert(v1 == v2, "stack oop mask generation error");
 335     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
 336 #ifdef ENABLE_ZAP_DEAD_LOCALS
 337     bool v3 = is_dead(max_locals + j) ? true : false;
 338     bool v4 = !stack[j].is_live()     ? true : false;
 339     assert(v3 == v4, "stack live mask generation error");
 340     assert(!(v1 && v3), "dead value marked as oop");
 341 #endif
 342   }
 343   if (TraceOopMapGeneration && Verbose) tty->cr();
 344   return true;
 345 }
 346 
 347 void OopMapCacheEntry::allocate_bit_mask() {
 348   if (mask_size() > small_mask_limit) {
 349     assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
 350     _bit_mask[0] = (intptr_t)
 351       NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size());
 352   }
 353 }
 354 
 355 void OopMapCacheEntry::deallocate_bit_mask() {
 356   if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
 357     assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
 358       "This bit mask should not be in the resource area");
 359     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
 360     debug_only(_bit_mask[0] = 0;)
 361   }
 362 }
 363 
 364 
 365 void OopMapCacheEntry::fill_for_native(methodHandle mh) {
 366   assert(mh->is_native(), "method must be native method");
 367   set_mask_size(mh->size_of_parameters() * bits_per_entry);
 368   allocate_bit_mask();
 369   // fill mask for parameters
 370   MaskFillerForNative mf(mh, bit_mask(), mask_size());
 371   mf.generate();
 372 }
 373 
 374 
 375 void OopMapCacheEntry::fill(methodHandle method, int bci) {
 376   HandleMark hm;
 377   // Flush entry to deallocate an existing entry
 378   flush();
 379   set_method(method());
 380   set_bci(bci);
 381   if (method->is_native()) {
 382     // Native method activations have oops only among the parameters and one
 383     // extra oop following the parameters (the mirror for static native methods).
 384     fill_for_native(method);
 385   } else {
 386     EXCEPTION_MARK;
 387     OopMapForCacheEntry gen(method, bci, this);
 388     gen.compute_map(CATCH);
 389   }
 390   #ifdef ASSERT
 391     verify();
 392   #endif
 393 }
 394 
 395 
 396 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) {
 397   // compute bit mask size
 398   int max_locals = method()->max_locals();
 399   int n_entries = max_locals + stack_top;
 400   set_mask_size(n_entries * bits_per_entry);
 401   allocate_bit_mask();
 402   set_expression_stack_size(stack_top);
 403 
 404   // compute bits
 405   int word_index = 0;
 406   uintptr_t value = 0;
 407   uintptr_t mask = 1;
 408 
 409   CellTypeState* cell = vars;
 410   for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
 411     // store last word
 412     if (mask == 0) {
 413       bit_mask()[word_index++] = value;
 414       value = 0;
 415       mask = 1;
 416     }
 417 
 418     // switch to stack when done with locals
 419     if (entry_index == max_locals) {
 420       cell = stack;
 421     }
 422 
 423     // set oop bit
 424     if ( cell->is_reference()) {
 425       value |= (mask << oop_bit_number );
 426     }
 427 
 428   #ifdef ENABLE_ZAP_DEAD_LOCALS
 429     // set dead bit
 430     if (!cell->is_live()) {
 431       value |= (mask << dead_bit_number);
 432       assert(!cell->is_reference(), "dead value marked as oop");
 433     }
 434   #endif
 435   }
 436 
 437   // make sure last word is stored
 438   bit_mask()[word_index] = value;
 439 
 440   // verify bit mask
 441   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
 442 
 443 
 444 }
 445 
 446 void OopMapCacheEntry::flush() {
 447   deallocate_bit_mask();
 448   initialize();
 449 }
 450 
 451 
 452 // Implementation of OopMapCache
 453 
 454 #ifndef PRODUCT
 455 
 456 static long _total_memory_usage = 0;
 457 
 458 long OopMapCache::memory_usage() {
 459   return _total_memory_usage;
 460 }
 461 
 462 #endif
 463 
 464 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
 465   assert(_resource_allocate_bit_mask,
 466     "Should not resource allocate the _bit_mask");
 467   assert(from->method()->is_oop(), "MethodOop is bad");
 468 
 469   set_method(from->method());
 470   set_bci(from->bci());
 471   set_mask_size(from->mask_size());
 472   set_expression_stack_size(from->expression_stack_size());
 473 
 474   // Is the bit mask contained in the entry?
 475   if (from->mask_size() <= small_mask_limit) {
 476     memcpy((void *)_bit_mask, (void *)from->_bit_mask,
 477       mask_word_size() * BytesPerWord);
 478   } else {
 479     // The expectation is that this InterpreterOopMap is a recently created
 480     // and empty. It is used to get a copy of a cached entry.
 481     // If the bit mask has a value, it should be in the
 482     // resource area.
 483     assert(_bit_mask[0] == 0 ||
 484       Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
 485       "The bit mask should have been allocated from a resource area");
 486     // Allocate the bit_mask from a Resource area for performance.  Allocating
 487     // from the C heap as is done for OopMapCache has a significant
 488     // performance impact.
 489     _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size());
 490     assert(_bit_mask[0] != 0, "bit mask was not allocated");
 491     memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0],
 492       mask_word_size() * BytesPerWord);
 493   }
 494 }
 495 
 496 inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) {
 497   // We use method->code_size() rather than method->identity_hash() below since
 498   // the mark may not be present if a pointer to the method is already reversed.
 499   return   ((unsigned int) bci)
 500          ^ ((unsigned int) method->max_locals()         << 2)
 501          ^ ((unsigned int) method->code_size()          << 4)
 502          ^ ((unsigned int) method->size_of_parameters() << 6);
 503 }
 504 
 505 
 506 OopMapCache::OopMapCache() :
 507   _mut(Mutex::leaf, "An OopMapCache lock", true)
 508 {
 509   _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size);
 510   // Cannot call flush for initialization, since flush
 511   // will check if memory should be deallocated
 512   for(int i = 0; i < _size; i++) _array[i].initialize();
 513   NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
 514 }
 515 
 516 
 517 OopMapCache::~OopMapCache() {
 518   assert(_array != NULL, "sanity check");
 519   // Deallocate oop maps that are allocated out-of-line
 520   flush();
 521   // Deallocate array
 522   NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
 523   FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
 524 }
 525 
 526 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
 527   return &_array[i % _size];
 528 }
 529 
 530 void OopMapCache::flush() {
 531   for (int i = 0; i < _size; i++) _array[i].flush();
 532 }
 533 
 534 void OopMapCache::flush_obsolete_entries() {
 535   for (int i = 0; i < _size; i++)
 536     if (!_array[i].is_empty() && _array[i].method()->is_old()) {
 537       // Cache entry is occupied by an old redefined method and we don't want
 538       // to pin it down so flush the entry.
 539       RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d",
 540         _array[i].method()->name()->as_C_string(),
 541         _array[i].method()->signature()->as_C_string(), i));
 542 
 543       _array[i].flush();
 544     }
 545 }
 546 
 547 void OopMapCache::oop_iterate(OopClosure *blk) {
 548   for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk);
 549 }
 550 
 551 void OopMapCache::oop_iterate(OopClosure *blk, MemRegion mr) {
 552     for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk, mr);
 553 }
 554 
 555 void OopMapCache::verify() {
 556   for (int i = 0; i < _size; i++) _array[i].verify();
 557 }
 558 
 559 void OopMapCache::lookup(methodHandle method,
 560                          int bci,
 561                          InterpreterOopMap* entry_for) {
 562   MutexLocker x(&_mut);
 563 
 564   OopMapCacheEntry* entry = NULL;
 565   int probe = hash_value_for(method, bci);
 566 
 567   // Search hashtable for match
 568   int i;
 569   for(i = 0; i < _probe_depth; i++) {
 570     entry = entry_at(probe + i);
 571     if (entry->match(method, bci)) {
 572       entry_for->resource_copy(entry);
 573       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 574       return;
 575     }
 576   }
 577 
 578   if (TraceOopMapGeneration) {
 579     static int count = 0;
 580     ResourceMark rm;
 581     tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
 582     method->print_value(); tty->cr();
 583   }
 584 
 585   // Entry is not in hashtable.
 586   // Compute entry and return it
 587 
 588   if (method->should_not_be_cached()) {
 589     // It is either not safe or not a good idea to cache this methodOop
 590     // at this time. We give the caller of lookup() a copy of the
 591     // interesting info via parameter entry_for, but we don't add it to
 592     // the cache. See the gory details in methodOop.cpp.
 593     compute_one_oop_map(method, bci, entry_for);
 594     return;
 595   }
 596 
 597   // First search for an empty slot
 598   for(i = 0; i < _probe_depth; i++) {
 599     entry  = entry_at(probe + i);
 600     if (entry->is_empty()) {
 601       entry->fill(method, bci);
 602       entry_for->resource_copy(entry);
 603       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 604       return;
 605     }
 606   }
 607 
 608   if (TraceOopMapGeneration) {
 609     ResourceMark rm;
 610     tty->print_cr("*** collision in oopmap cache - flushing item ***");
 611   }
 612 
 613   // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
 614   //entry_at(probe + _probe_depth - 1)->flush();
 615   //for(i = _probe_depth - 1; i > 0; i--) {
 616   //  // Coping entry[i] = entry[i-1];
 617   //  OopMapCacheEntry *to   = entry_at(probe + i);
 618   //  OopMapCacheEntry *from = entry_at(probe + i - 1);
 619   //  to->copy(from);
 620   // }
 621 
 622   assert(method->is_method(), "gaga");
 623 
 624   entry = entry_at(probe + 0);
 625   entry->fill(method, bci);
 626 
 627   // Copy the  newly cached entry to input parameter
 628   entry_for->resource_copy(entry);
 629 
 630   if (TraceOopMapGeneration) {
 631     ResourceMark rm;
 632     tty->print("Done with ");
 633     method->print_value(); tty->cr();
 634   }
 635   assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 636 
 637   return;
 638 }
 639 
 640 void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) {
 641   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
 642   OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1);
 643   tmp->initialize();
 644   tmp->fill(method, bci);
 645   entry->resource_copy(tmp);
 646   FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp);
 647 }