1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)oopMapCache.cpp      1.86 08/11/24 12:22:25 JVM"
   3 #endif
   4 /*
   5  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 # include "incls/_precompiled.incl"
  29 # include "incls/_oopMapCache.cpp.incl"
  30 
  31 class OopMapCacheEntry: private InterpreterOopMap {
  32   friend class InterpreterOopMap;
  33   friend class OopMapForCacheEntry;
  34   friend class OopMapCache;
  35   friend class VerifyClosure;
  36 
  37  protected:
  38   // Initialization
  39   void fill(methodHandle method, int bci);
  40   // fills the bit mask for native calls
  41   void fill_for_native(methodHandle method);  
  42   void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);
  43 
  44   // Deallocate bit masks and initialize fields
  45   void flush();
  46 
  47  private:
  48   void allocate_bit_mask();   // allocates the bit mask on C heap f necessary
  49   void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
  50   bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
  51 
  52  public:
  53   OopMapCacheEntry() : InterpreterOopMap() {
  54 #ifdef ASSERT
  55      _resource_allocate_bit_mask = false;
  56 #endif
  57   }
  58 };
  59 
  60 
  61 // Implementation of OopMapForCacheEntry
  62 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
  63 
  64 class OopMapForCacheEntry: public GenerateOopMap {
  65   OopMapCacheEntry *_entry;
  66   int               _bci;
  67   int               _stack_top;
  68 
  69   virtual bool report_results() const     { return false; }
  70   virtual bool possible_gc_point          (BytecodeStream *bcs);
  71   virtual void fill_stackmap_prolog       (int nof_gc_points);
  72   virtual void fill_stackmap_epilog       ();
  73   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
  74                                            CellTypeState* vars, 
  75                                            CellTypeState* stack, 
  76                                            int stack_top);
  77   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars);
  78 
  79  public:
  80   OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry);
  81 
  82   // Computes stack map for (method,bci) and initialize entry
  83   void compute_map(TRAPS);
  84   int  size();
  85 };
  86 
  87 
  88 OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
  89   _bci       = bci;
  90   _entry     = entry;  
  91   _stack_top = -1;
  92 }
  93 
  94 
  95 void OopMapForCacheEntry::compute_map(TRAPS) {    
  96   assert(!method()->is_native(), "cannot compute oop map for native methods");
  97   // First check if it is a method where the stackmap is always empty
  98   if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
  99     _entry->set_mask_size(0);
 100   } else {
 101     ResourceMark rm;    
 102     GenerateOopMap::compute_map(CATCH);
 103     result_for_basicblock(_bci);
 104   }
 105 }
 106 
 107 
 108 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {  
 109   return false; // We are not reporting any result. We call result_for_basicblock directly
 110 }
 111 
 112 
 113 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) {
 114   // Do nothing
 115 }
 116 
 117 
 118 void OopMapForCacheEntry::fill_stackmap_epilog() {
 119   // Do nothing
 120 }
 121 
 122 
 123 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) {
 124   // Do nothing
 125 }
 126 
 127 
 128 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,
 129                                                     CellTypeState* vars, 
 130                                                     CellTypeState* stack, 
 131                                                     int stack_top) {
 132   // Only interested in one specific bci
 133   if (bcs->bci() == _bci) {
 134     _entry->set_mask(vars, stack, stack_top);
 135     _stack_top = stack_top;
 136   }
 137 }
 138 
 139 
 140 int OopMapForCacheEntry::size() {
 141   assert(_stack_top != -1, "compute_map must be called first");
 142   return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top;
 143 }
 144 
 145 
 146 // Implementation of InterpreterOopMap and OopMapCacheEntry
 147 
 148 class VerifyClosure : public OffsetClosure {
 149  private:
 150   OopMapCacheEntry* _entry;
 151   bool              _failed;
 152 
 153  public:
 154   VerifyClosure(OopMapCacheEntry* entry)         { _entry = entry; _failed = false; }
 155   void offset_do(int offset)                     { if (!_entry->is_oop(offset)) _failed = true; }
 156   bool failed() const                            { return _failed; }
 157 };
 158 
 159 InterpreterOopMap::InterpreterOopMap() {
 160   initialize();
 161 #ifdef ASSERT
 162   _resource_allocate_bit_mask = true;
 163 #endif
 164 }
 165 
 166 InterpreterOopMap::~InterpreterOopMap() {
 167   // The expection is that the bit mask was allocated
 168   // last in this resource area.  That would make the free of the
 169   // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free).
 170   // If it was not allocated last, there is not a correctness problem
 171   // but the space for the bit_mask is not freed.
 172   assert(_resource_allocate_bit_mask, "Trying to free C heap space");
 173   if (mask_size() > small_mask_limit) {
 174     FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size());
 175   }
 176 }
 177 
 178 bool InterpreterOopMap::is_empty() {
 179   bool result = _method == NULL;
 180   assert(_method != NULL || (_bci == 0 && 
 181     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
 182     _bit_mask[0] == 0), "Should be completely empty");
 183   return result;
 184 }
 185 
 186 void InterpreterOopMap::initialize() {
 187   _method    = NULL;
 188   _mask_size = USHRT_MAX;  // This value should cause a failure quickly
 189   _bci       = 0;
 190   _expression_stack_size = 0;
 191   for (int i = 0; i < N; i++) _bit_mask[i] = 0;
 192 }
 193 
 194 
 195 void InterpreterOopMap::oop_iterate(OopClosure *blk) {
 196   if (method() != NULL) {
 197     blk->do_oop((oop*) &_method);
 198   }
 199 }
 200 
 201 void InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) {
 202   if (method() != NULL && mr.contains(&_method)) {
 203     blk->do_oop((oop*) &_method);
 204   }
 205 }
 206 
 207 
 208 
 209 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) {
 210   int n = number_of_entries();
 211   int word_index = 0;
 212   uintptr_t value = 0;
 213   uintptr_t mask = 0;
 214   // iterate over entries
 215   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
 216     // get current word
 217     if (mask == 0) {
 218       value = bit_mask()[word_index++];
 219       mask = 1;
 220     }
 221     // test for oop
 222     if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i);
 223   }
 224 }
 225 
 226 void InterpreterOopMap::verify() {
 227   // If we are doing mark sweep _method may not have a valid header
 228   // $$$ This used to happen only for m/s collections; we might want to
 229   // think of an appropriate generalization of this distinction.
 230   guarantee(Universe::heap()->is_gc_active() ||
 231             _method->is_oop_or_null(), "invalid oop in oopMapCache")
 232 }
 233 
 234 #ifdef ENABLE_ZAP_DEAD_LOCALS
 235 
 236 void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) {
 237   int n = number_of_entries();
 238   int word_index = 0;
 239   uintptr_t value = 0;
 240   uintptr_t mask = 0;
 241   // iterate over entries
 242   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
 243     // get current word
 244     if (mask == 0) {
 245       value = bit_mask()[word_index++];
 246       mask = 1;
 247     }
 248     // test for dead values  & oops, and for live values
 249          if ((value & (mask << dead_bit_number)) != 0)  dead_closure->offset_do(i); // call this for all dead values or oops
 250     else if ((value & (mask <<  oop_bit_number)) != 0)   oop_closure->offset_do(i); // call this for all live oops
 251     else                                               value_closure->offset_do(i); // call this for all live values
 252   }
 253 }
 254 
 255 #endif
 256 
 257 
 258 void InterpreterOopMap::print() {
 259   int n = number_of_entries();
 260   tty->print("oop map for ");
 261   method()->print_value();
 262   tty->print(" @ %d = [%d] { ", bci(), n);
 263   for (int i = 0; i < n; i++) {
 264 #ifdef ENABLE_ZAP_DEAD_LOCALS
 265     if (is_dead(i)) tty->print("%d+ ", i);
 266     else
 267 #endif
 268     if (is_oop(i)) tty->print("%d ", i);
 269   }
 270   tty->print_cr("}");
 271 }
 272 
 273 class MaskFillerForNative: public NativeSignatureIterator {
 274  private:
 275   uintptr_t * _mask;                             // the bit mask to be filled
 276   int         _size;                             // the mask size in bits
 277 
 278   void set_one(int i) {
 279     i *= InterpreterOopMap::bits_per_entry;
 280     assert(0 <= i && i < _size, "offset out of bounds");
 281     _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
 282   }
 283 
 284  public:
 285   void pass_int()                                { /* ignore */ }
 286   void pass_long()                               { /* ignore */ }
 287 #ifdef _LP64
 288   void pass_float()                              { /* ignore */ }
 289 #endif
 290   void pass_double()                             { /* ignore */ }
 291   void pass_object()                             { set_one(offset()); }
 292 
 293   MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
 294     _mask   = mask;
 295     _size   = size;
 296     // initialize with 0
 297     int i = (size + BitsPerWord - 1) / BitsPerWord;
 298     while (i-- > 0) _mask[i] = 0;
 299   }
 300 
 301   void generate() {
 302     NativeSignatureIterator::iterate();
 303   }
 304 };
 305 
 306 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
 307   // Check mask includes map
 308   VerifyClosure blk(this);
 309   iterate_oop(&blk);
 310   if (blk.failed()) return false;
 311 
 312   // Check if map is generated correctly
 313   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
 314   if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);  
 315 
 316   for(int i = 0; i < max_locals; i++) {
 317     bool v1 = is_oop(i)               ? true : false;
 318     bool v2 = vars[i].is_reference()  ? true : false;
 319     assert(v1 == v2, "locals oop mask generation error");
 320     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);      
 321 #ifdef ENABLE_ZAP_DEAD_LOCALS
 322     bool v3 = is_dead(i)              ? true : false;
 323     bool v4 = !vars[i].is_live()      ? true : false;
 324     assert(v3 == v4, "locals live mask generation error");
 325     assert(!(v1 && v3), "dead value marked as oop");
 326 #endif
 327   }
 328 
 329   if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
 330   for(int j = 0; j < stack_top; j++) {
 331     bool v1 = is_oop(max_locals + j)  ? true : false;
 332     bool v2 = stack[j].is_reference() ? true : false;
 333     assert(v1 == v2, "stack oop mask generation error");
 334     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);      
 335 #ifdef ENABLE_ZAP_DEAD_LOCALS
 336     bool v3 = is_dead(max_locals + j) ? true : false;
 337     bool v4 = !stack[j].is_live()     ? true : false;
 338     assert(v3 == v4, "stack live mask generation error");
 339     assert(!(v1 && v3), "dead value marked as oop");
 340 #endif
 341   }
 342   if (TraceOopMapGeneration && Verbose) tty->cr();
 343   return true;
 344 }
 345 
 346 void OopMapCacheEntry::allocate_bit_mask() {
 347   if (mask_size() > small_mask_limit) {
 348     assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
 349     _bit_mask[0] = (intptr_t) 
 350       NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size());
 351   }
 352 }
 353 
 354 void OopMapCacheEntry::deallocate_bit_mask() {
 355   if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
 356     assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 
 357       "This bit mask should not be in the resource area");
 358     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
 359     debug_only(_bit_mask[0] = 0;)
 360   }
 361 }
 362 
 363 
 364 void OopMapCacheEntry::fill_for_native(methodHandle mh) {
 365   assert(mh->is_native(), "method must be native method");
 366   set_mask_size(mh->size_of_parameters() * bits_per_entry);
 367   allocate_bit_mask();
 368   // fill mask for parameters
 369   MaskFillerForNative mf(mh, bit_mask(), mask_size());
 370   mf.generate();
 371 }
 372 
 373 
 374 void OopMapCacheEntry::fill(methodHandle method, int bci) {
 375   HandleMark hm;
 376   // Flush entry to deallocate an existing entry
 377   flush();
 378   set_method(method());
 379   set_bci(bci);
 380   if (method->is_native()) {
 381     // Native method activations have oops only among the parameters and one
 382     // extra oop following the parameters (the mirror for static native methods).
 383     fill_for_native(method);
 384   } else {
 385     EXCEPTION_MARK;
 386     OopMapForCacheEntry gen(method, bci, this);    
 387     gen.compute_map(CATCH);
 388   }
 389   #ifdef ASSERT
 390     verify();
 391   #endif
 392 }
 393 
 394 
 395 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) {
 396   // compute bit mask size
 397   int max_locals = method()->max_locals();
 398   int n_entries = max_locals + stack_top;
 399   set_mask_size(n_entries * bits_per_entry);
 400   allocate_bit_mask();
 401   set_expression_stack_size(stack_top);
 402 
 403   // compute bits
 404   int word_index = 0;
 405   uintptr_t value = 0;
 406   uintptr_t mask = 1;
 407   
 408   CellTypeState* cell = vars;
 409   for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
 410     // store last word
 411     if (mask == 0) {
 412       bit_mask()[word_index++] = value;
 413       value = 0;
 414       mask = 1;
 415     }
 416 
 417     // switch to stack when done with locals
 418     if (entry_index == max_locals) {
 419       cell = stack;
 420     }
 421 
 422     // set oop bit
 423     if ( cell->is_reference()) {
 424       value |= (mask << oop_bit_number );
 425     }
 426 
 427   #ifdef ENABLE_ZAP_DEAD_LOCALS
 428     // set dead bit
 429     if (!cell->is_live()) {
 430       value |= (mask << dead_bit_number);
 431       assert(!cell->is_reference(), "dead value marked as oop");
 432     }
 433   #endif
 434   }
 435 
 436   // make sure last word is stored
 437   bit_mask()[word_index] = value;
 438 
 439   // verify bit mask
 440   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
 441 
 442 
 443 }
 444 
 445 void OopMapCacheEntry::flush() {
 446   deallocate_bit_mask();
 447   initialize();
 448 }
 449 
 450 
 451 // Implementation of OopMapCache
 452 
 453 #ifndef PRODUCT
 454 
 455 static long _total_memory_usage = 0;
 456 
 457 long OopMapCache::memory_usage() {
 458   return _total_memory_usage;
 459 }
 460 
 461 #endif
 462 
 463 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { 
 464   assert(_resource_allocate_bit_mask, 
 465     "Should not resource allocate the _bit_mask");
 466   assert(from->method()->is_oop(), "MethodOop is bad");
 467 
 468   set_method(from->method());
 469   set_bci(from->bci());
 470   set_mask_size(from->mask_size());
 471   set_expression_stack_size(from->expression_stack_size());
 472 
 473   // Is the bit mask contained in the entry?
 474   if (from->mask_size() <= small_mask_limit) {
 475     memcpy((void *)_bit_mask, (void *)from->_bit_mask, 
 476       mask_word_size() * BytesPerWord);
 477   } else {
 478     // The expectation is that this InterpreterOopMap is a recently created
 479     // and empty. It is used to get a copy of a cached entry. 
 480     // If the bit mask has a value, it should be in the
 481     // resource area.
 482     assert(_bit_mask[0] == 0 || 
 483       Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 
 484       "The bit mask should have been allocated from a resource area");
 485     // Allocate the bit_mask from a Resource area for performance.  Allocating
 486     // from the C heap as is done for OopMapCache has a significant
 487     // performance impact.
 488     _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size());
 489     assert(_bit_mask[0] != 0, "bit mask was not allocated");
 490     memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0],
 491       mask_word_size() * BytesPerWord);
 492   }
 493 }
 494 
 495 inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) {
 496   // We use method->code_size() rather than method->identity_hash() below since
 497   // the mark may not be present if a pointer to the method is already reversed.
 498   return   ((unsigned int) bci)
 499          ^ ((unsigned int) method->max_locals()         << 2)
 500          ^ ((unsigned int) method->code_size()          << 4)
 501          ^ ((unsigned int) method->size_of_parameters() << 6);
 502 }
 503 
 504 
 505 OopMapCache::OopMapCache() :
 506   _mut(Mutex::leaf, "An OopMapCache lock", true)
 507 {
 508   _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size);
 509   // Cannot call flush for initialization, since flush
 510   // will check if memory should be deallocated
 511   for(int i = 0; i < _size; i++) _array[i].initialize();
 512   NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
 513 }
 514 
 515 
 516 OopMapCache::~OopMapCache() {
 517   assert(_array != NULL, "sanity check");
 518   // Deallocate oop maps that are allocated out-of-line
 519   flush();
 520   // Deallocate array
 521   NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
 522   FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);  
 523 }
 524 
 525 OopMapCacheEntry* OopMapCache::entry_at(int i) const { 
 526   return &_array[i % _size]; 
 527 }
 528 
 529 void OopMapCache::flush() { 
 530   for (int i = 0; i < _size; i++) _array[i].flush(); 
 531 }
 532 
 533 void OopMapCache::flush_obsolete_entries() {
 534   for (int i = 0; i < _size; i++)
 535     if (!_array[i].is_empty() && _array[i].method()->is_old()) {
 536       // Cache entry is occupied by an old redefined method and we don't want
 537       // to pin it down so flush the entry.
 538       RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d",
 539         _array[i].method()->name()->as_C_string(),
 540         _array[i].method()->signature()->as_C_string(), i));
 541 
 542       _array[i].flush();
 543     }
 544 }
 545 
 546 void OopMapCache::oop_iterate(OopClosure *blk) {
 547   for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk); 
 548 }
 549 
 550 void OopMapCache::oop_iterate(OopClosure *blk, MemRegion mr) {
 551     for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk, mr);
 552 }
 553 
 554 void OopMapCache::verify() {
 555   for (int i = 0; i < _size; i++) _array[i].verify(); 
 556 }
 557 
 558 void OopMapCache::lookup(methodHandle method, 
 559                          int bci,
 560                          InterpreterOopMap* entry_for) {
 561   MutexLocker x(&_mut);
 562 
 563   OopMapCacheEntry* entry = NULL;
 564   int probe = hash_value_for(method, bci);
 565 
 566   // Search hashtable for match
 567   int i;
 568   for(i = 0; i < _probe_depth; i++) {    
 569     entry = entry_at(probe + i);
 570     if (entry->match(method, bci)) {
 571       entry_for->resource_copy(entry);
 572       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 573       return;
 574     }
 575   }
 576 
 577   if (TraceOopMapGeneration) {
 578     static int count = 0;
 579     ResourceMark rm;
 580     tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
 581     method->print_value(); tty->cr();
 582   }
 583 
 584   // Entry is not in hashtable. 
 585   // Compute entry and return it
 586 
 587   if (method->should_not_be_cached()) {
 588     // It is either not safe or not a good idea to cache this methodOop
 589     // at this time. We give the caller of lookup() a copy of the
 590     // interesting info via parameter entry_for, but we don't add it to
 591     // the cache. See the gory details in methodOop.cpp.
 592     compute_one_oop_map(method, bci, entry_for);
 593     return;
 594   }
 595 
 596   // First search for an empty slot
 597   for(i = 0; i < _probe_depth; i++) {
 598     entry  = entry_at(probe + i);
 599     if (entry->is_empty()) {
 600       entry->fill(method, bci);
 601       entry_for->resource_copy(entry);
 602       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 603       return; 
 604     }
 605   }
 606 
 607   if (TraceOopMapGeneration) {
 608     ResourceMark rm;
 609     tty->print_cr("*** collision in oopmap cache - flushing item ***");
 610   }
 611 
 612   // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
 613   //entry_at(probe + _probe_depth - 1)->flush();
 614   //for(i = _probe_depth - 1; i > 0; i--) {
 615   //  // Coping entry[i] = entry[i-1];
 616   //  OopMapCacheEntry *to   = entry_at(probe + i);
 617   //  OopMapCacheEntry *from = entry_at(probe + i - 1);    
 618   //  to->copy(from);    
 619   // }
 620 
 621   assert(method->is_method(), "gaga");
 622  
 623   entry = entry_at(probe + 0);
 624   entry->fill(method, bci);
 625 
 626   // Copy the  newly cached entry to input parameter
 627   entry_for->resource_copy(entry);
 628 
 629   if (TraceOopMapGeneration) {
 630     ResourceMark rm;
 631     tty->print("Done with ");
 632     method->print_value(); tty->cr();
 633   }
 634   assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
 635 
 636   return;
 637 }
 638 
 639 void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) {
 640   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
 641   OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1);
 642   tmp->initialize();
 643   tmp->fill(method, bci);
 644   entry->resource_copy(tmp);
 645   FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp);
 646 }