1 /*
   2  * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/oopStorage.inline.hpp"
  27 #include "gc/shared/oopStorageSet.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/iterator.hpp"
  30 #include "memory/universe.hpp"
  31 #include "oops/access.inline.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/handles.inline.hpp"
  34 #include "runtime/jniHandles.inline.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/thread.inline.hpp"
  37 #include "utilities/align.hpp"
  38 #include "utilities/debug.hpp"
  39 
  40 OopStorage* JNIHandles::global_handles() {
  41   return _global_handles;
  42 }
  43 
  44 OopStorage* JNIHandles::weak_global_handles() {
  45   return _weak_global_handles;
  46 }
  47 
  48 // Serviceability agent support.
  49 OopStorage* JNIHandles::_global_handles = NULL;
  50 OopStorage* JNIHandles::_weak_global_handles = NULL;
  51 
  52 void jni_handles_init() {
  53   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global");
  54   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak");
  55 }
  56 
  57 jobject JNIHandles::make_local(oop obj) {
  58   return make_local(Thread::current(), obj);
  59 }
  60 
  61 
  62 jobject JNIHandles::make_local(Thread* thread, oop obj) {
  63   if (obj == NULL) {
  64     return NULL;                // ignore null handles
  65   } else {
  66     assert(oopDesc::is_oop(obj), "not an oop");
  67     assert(thread->is_Java_thread(), "not a Java thread");
  68     assert(!current_thread_in_native(), "must not be in native");
  69     return thread->active_handles()->allocate_handle(obj);
  70   }
  71 }
  72 
  73 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
  74                                              const char* handle_kind) {
  75   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  76     // Fake size value, since we don't know the min allocation size here.
  77     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
  78                           "Cannot create %s JNI handle", handle_kind);
  79   } else {
  80     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
  81   }
  82 }
  83 
  84 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
  85   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  86   assert(!current_thread_in_native(), "must not be in native");
  87   jobject res = NULL;
  88   if (!obj.is_null()) {
  89     // ignore null handles
  90     assert(oopDesc::is_oop(obj()), "not an oop");
  91     oop* ptr = global_handles()->allocate();
  92     // Return NULL on allocation failure.
  93     if (ptr != NULL) {
  94       assert(*ptr == NULL, "invariant");
  95       NativeAccess<>::oop_store(ptr, obj());
  96       res = reinterpret_cast<jobject>(ptr);
  97     } else {
  98       report_handle_allocation_failure(alloc_failmode, "global");
  99     }
 100   }
 101 
 102   return res;
 103 }
 104 
 105 
 106 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
 107   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 108   assert(!current_thread_in_native(), "must not be in native");
 109   jobject res = NULL;
 110   if (!obj.is_null()) {
 111     // ignore null handles
 112     assert(oopDesc::is_oop(obj()), "not an oop");
 113     oop* ptr = weak_global_handles()->allocate();
 114     // Return NULL on allocation failure.
 115     if (ptr != NULL) {
 116       assert(*ptr == NULL, "invariant");
 117       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
 118       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
 119       res = reinterpret_cast<jobject>(tptr);
 120     } else {
 121       report_handle_allocation_failure(alloc_failmode, "weak global");
 122     }
 123   }
 124   return res;
 125 }
 126 
 127 // Resolve some erroneous cases to NULL, rather than treating them as
 128 // possibly unchecked errors.  In particular, deleted handles are
 129 // treated as NULL (though a deleted and later reallocated handle
 130 // isn't detected).
 131 oop JNIHandles::resolve_external_guard(jobject handle) {
 132   oop result = NULL;
 133   if (handle != NULL) {
 134     result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
 135   }
 136   return result;
 137 }
 138 
 139 bool JNIHandles::is_global_weak_cleared(jweak handle) {
 140   assert(handle != NULL, "precondition");
 141   assert(is_jweak(handle), "not a weak handle");
 142   oop* oop_ptr = jweak_ptr(handle);
 143   oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
 144   return value == NULL;
 145 }
 146 
 147 void JNIHandles::destroy_global(jobject handle) {
 148   if (handle != NULL) {
 149     assert(!is_jweak(handle), "wrong method for detroying jweak");
 150     oop* oop_ptr = jobject_ptr(handle);
 151     NativeAccess<>::oop_store(oop_ptr, (oop)NULL);
 152     global_handles()->release(oop_ptr);
 153   }
 154 }
 155 
 156 
 157 void JNIHandles::destroy_weak_global(jobject handle) {
 158   if (handle != NULL) {
 159     assert(is_jweak(handle), "JNI handle not jweak");
 160     oop* oop_ptr = jweak_ptr(handle);
 161     NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
 162     weak_global_handles()->release(oop_ptr);
 163   }
 164 }
 165 
 166 
 167 void JNIHandles::oops_do(OopClosure* f) {
 168   global_handles()->oops_do(f);
 169 }
 170 
 171 
 172 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 173   weak_global_handles()->weak_oops_do(is_alive, f);
 174 }
 175 
 176 
 177 void JNIHandles::weak_oops_do(OopClosure* f) {
 178   weak_global_handles()->weak_oops_do(f);
 179 }
 180 
 181 bool JNIHandles::is_global_storage(const OopStorage* storage) {
 182   return _global_handles == storage;
 183 }
 184 
 185 inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
 186   return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
 187 }
 188 
 189 
 190 jobjectRefType JNIHandles::handle_type(Thread* thread, jobject handle) {
 191   assert(handle != NULL, "precondition");
 192   jobjectRefType result = JNIInvalidRefType;
 193   if (is_jweak(handle)) {
 194     if (is_storage_handle(weak_global_handles(), jweak_ptr(handle))) {
 195       result = JNIWeakGlobalRefType;
 196     }
 197   } else {
 198     switch (global_handles()->allocation_status(jobject_ptr(handle))) {
 199     case OopStorage::ALLOCATED_ENTRY:
 200       result = JNIGlobalRefType;
 201       break;
 202 
 203     case OopStorage::UNALLOCATED_ENTRY:
 204       break;                    // Invalid global handle
 205 
 206     case OopStorage::INVALID_ENTRY:
 207       // Not in global storage.  Might be a local handle.
 208       if (is_local_handle(thread, handle) ||
 209           (thread->is_Java_thread() &&
 210            is_frame_handle((JavaThread*)thread, handle))) {
 211         result = JNILocalRefType;
 212       }
 213       break;
 214 
 215     default:
 216       ShouldNotReachHere();
 217     }
 218   }
 219   return result;
 220 }
 221 
 222 
 223 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) {
 224   assert(handle != NULL, "precondition");
 225   JNIHandleBlock* block = thread->active_handles();
 226 
 227   // Look back past possible native calls to jni_PushLocalFrame.
 228   while (block != NULL) {
 229     if (block->chain_contains(handle)) {
 230       return true;
 231     }
 232     block = block->pop_frame_link();
 233   }
 234   return false;
 235 }
 236 
 237 
 238 // Determine if the handle is somewhere in the current thread's stack.
 239 // We easily can't isolate any particular stack frame the handle might
 240 // come from, so we'll check the whole stack.
 241 
 242 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
 243   assert(handle != NULL, "precondition");
 244   // If there is no java frame, then this must be top level code, such
 245   // as the java command executable, in which case, this type of handle
 246   // is not permitted.
 247   return (thr->has_last_Java_frame() &&
 248           thr->is_in_stack_range_incl((address)handle, (address)thr->last_Java_sp()));
 249 }
 250 
 251 
 252 bool JNIHandles::is_global_handle(jobject handle) {
 253   assert(handle != NULL, "precondition");
 254   return !is_jweak(handle) && is_storage_handle(global_handles(), jobject_ptr(handle));
 255 }
 256 
 257 
 258 bool JNIHandles::is_weak_global_handle(jobject handle) {
 259   assert(handle != NULL, "precondition");
 260   return is_jweak(handle) && is_storage_handle(weak_global_handles(), jweak_ptr(handle));
 261 }
 262 
 263 size_t JNIHandles::global_handle_memory_usage() {
 264   return global_handles()->total_memory_usage();
 265 }
 266 
 267 size_t JNIHandles::weak_global_handle_memory_usage() {
 268   return weak_global_handles()->total_memory_usage();
 269 }
 270 
 271 
 272 // We assume this is called at a safepoint: no lock is needed.
 273 void JNIHandles::print_on(outputStream* st) {
 274   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 275 
 276   st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
 277                global_handles()->allocation_count(),
 278                weak_global_handles()->allocation_count());
 279   st->cr();
 280   st->flush();
 281 }
 282 
 283 void JNIHandles::print() { print_on(tty); }
 284 
 285 class VerifyJNIHandles: public OopClosure {
 286 public:
 287   virtual void do_oop(oop* root) {
 288     guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop");
 289   }
 290   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
 291 };
 292 
 293 void JNIHandles::verify() {
 294   VerifyJNIHandles verify_handle;
 295 
 296   oops_do(&verify_handle);
 297   weak_oops_do(&verify_handle);
 298 }
 299 
 300 // This method is implemented here to avoid circular includes between
 301 // jniHandles.hpp and thread.hpp.
 302 bool JNIHandles::current_thread_in_native() {
 303   Thread* thread = Thread::current();
 304   return (thread->is_Java_thread() &&
 305           JavaThread::current()->thread_state() == _thread_in_native);
 306 }
 307 
 308 
 309 int             JNIHandleBlock::_blocks_allocated     = 0;
 310 JNIHandleBlock* JNIHandleBlock::_block_free_list      = NULL;
 311 #ifndef PRODUCT
 312 JNIHandleBlock* JNIHandleBlock::_block_list           = NULL;
 313 #endif
 314 
 315 static inline bool is_tagged_free_list(uintptr_t value) {
 316   return (value & 1u) != 0;
 317 }
 318 
 319 static inline uintptr_t tag_free_list(uintptr_t value) {
 320   return value | 1u;
 321 }
 322 
 323 static inline uintptr_t untag_free_list(uintptr_t value) {
 324   return value & ~(uintptr_t)1u;
 325 }
 326 
 327 // There is a freelist of handles running through the JNIHandleBlock
 328 // with a tagged next pointer, distinguishing these next pointers from
 329 // oops. The freelist handling currently relies on the size of oops
 330 // being the same as a native pointer. If this ever changes, then
 331 // this freelist handling must change too.
 332 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
 333 
 334 #ifdef ASSERT
 335 void JNIHandleBlock::zap() {
 336   // Zap block values
 337   _top = 0;
 338   for (int index = 0; index < block_size_in_oops; index++) {
 339     // NOT using Access here; just bare clobbering to NULL, since the
 340     // block no longer contains valid oops.
 341     _handles[index] = 0;
 342   }
 343 }
 344 #endif // ASSERT
 345 
 346 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread)  {
 347   assert(thread == NULL || thread == Thread::current(), "sanity check");
 348   JNIHandleBlock* block;
 349   // Check the thread-local free list for a block so we don't
 350   // have to acquire a mutex.
 351   if (thread != NULL && thread->free_handle_block() != NULL) {
 352     block = thread->free_handle_block();
 353     thread->set_free_handle_block(block->_next);
 354   }
 355   else {
 356     // locking with safepoint checking introduces a potential deadlock:
 357     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 358     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 359     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 360     MutexLocker ml(JNIHandleBlockFreeList_lock,
 361                    Mutex::_no_safepoint_check_flag);
 362     if (_block_free_list == NULL) {
 363       // Allocate new block
 364       block = new JNIHandleBlock();
 365       _blocks_allocated++;
 366       block->zap();
 367       #ifndef PRODUCT
 368       // Link new block to list of all allocated blocks
 369       block->_block_list_link = _block_list;
 370       _block_list = block;
 371       #endif
 372     } else {
 373       // Get block from free list
 374       block = _block_free_list;
 375       _block_free_list = _block_free_list->_next;
 376     }
 377   }
 378   block->_top = 0;
 379   block->_next = NULL;
 380   block->_pop_frame_link = NULL;
 381   block->_planned_capacity = block_size_in_oops;
 382   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 383   debug_only(block->_last = NULL);
 384   debug_only(block->_free_list = NULL);
 385   debug_only(block->_allocate_before_rebuild = -1);
 386   return block;
 387 }
 388 
 389 
 390 void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
 391   assert(thread == NULL || thread == Thread::current(), "sanity check");
 392   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
 393   // Put returned block at the beginning of the thread-local free list.
 394   // Note that if thread == NULL, we use it as an implicit argument that
 395   // we _don't_ want the block to be kept on the free_handle_block.
 396   // See for instance JavaThread::exit().
 397   if (thread != NULL ) {
 398     block->zap();
 399     JNIHandleBlock* freelist = thread->free_handle_block();
 400     block->_pop_frame_link = NULL;
 401     thread->set_free_handle_block(block);
 402 
 403     // Add original freelist to end of chain
 404     if ( freelist != NULL ) {
 405       while ( block->_next != NULL ) block = block->_next;
 406       block->_next = freelist;
 407     }
 408     block = NULL;
 409   }
 410   if (block != NULL) {
 411     // Return blocks to free list
 412     // locking with safepoint checking introduces a potential deadlock:
 413     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 414     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 415     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 416     MutexLocker ml(JNIHandleBlockFreeList_lock,
 417                    Mutex::_no_safepoint_check_flag);
 418     while (block != NULL) {
 419       block->zap();
 420       JNIHandleBlock* next = block->_next;
 421       block->_next = _block_free_list;
 422       _block_free_list = block;
 423       block = next;
 424     }
 425   }
 426   if (pop_frame_link != NULL) {
 427     // As a sanity check we release blocks pointed to by the pop_frame_link.
 428     // This should never happen (only if PopLocalFrame is not called the
 429     // correct number of times).
 430     release_block(pop_frame_link, thread);
 431   }
 432 }
 433 
 434 
 435 void JNIHandleBlock::oops_do(OopClosure* f) {
 436   JNIHandleBlock* current_chain = this;
 437   // Iterate over chain of blocks, followed by chains linked through the
 438   // pop frame links.
 439   while (current_chain != NULL) {
 440     for (JNIHandleBlock* current = current_chain; current != NULL;
 441          current = current->_next) {
 442       assert(current == current_chain || current->pop_frame_link() == NULL,
 443         "only blocks first in chain should have pop frame link set");
 444       for (int index = 0; index < current->_top; index++) {
 445         uintptr_t* addr = &(current->_handles)[index];
 446         uintptr_t value = *addr;
 447         // traverse heap pointers only, not deleted handles or free list
 448         // pointers
 449         if (value != 0 && !is_tagged_free_list(value)) {
 450           oop* root = (oop*)addr;
 451           f->do_oop(root);
 452         }
 453       }
 454       // the next handle block is valid only if current block is full
 455       if (current->_top < block_size_in_oops) {
 456         break;
 457       }
 458     }
 459     current_chain = current_chain->pop_frame_link();
 460   }
 461 }
 462 
 463 
 464 jobject JNIHandleBlock::allocate_handle(oop obj) {
 465   assert(Universe::heap()->is_in(obj), "sanity check");
 466   if (_top == 0) {
 467     // This is the first allocation or the initial block got zapped when
 468     // entering a native function. If we have any following blocks they are
 469     // not valid anymore.
 470     for (JNIHandleBlock* current = _next; current != NULL;
 471          current = current->_next) {
 472       assert(current->_last == NULL, "only first block should have _last set");
 473       assert(current->_free_list == NULL,
 474              "only first block should have _free_list set");
 475       if (current->_top == 0) {
 476         // All blocks after the first clear trailing block are already cleared.
 477 #ifdef ASSERT
 478         for (current = current->_next; current != NULL; current = current->_next) {
 479           assert(current->_top == 0, "trailing blocks must already be cleared");
 480         }
 481 #endif
 482         break;
 483       }
 484       current->_top = 0;
 485       current->zap();
 486     }
 487     // Clear initial block
 488     _free_list = NULL;
 489     _allocate_before_rebuild = 0;
 490     _last = this;
 491     zap();
 492   }
 493 
 494   // Try last block
 495   if (_last->_top < block_size_in_oops) {
 496     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
 497     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 498     return (jobject) handle;
 499   }
 500 
 501   // Try free list
 502   if (_free_list != NULL) {
 503     oop* handle = (oop*)_free_list;
 504     _free_list = (uintptr_t*) untag_free_list(*_free_list);
 505     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 506     return (jobject) handle;
 507   }
 508   // Check if unused block follow last
 509   if (_last->_next != NULL) {
 510     // update last and retry
 511     _last = _last->_next;
 512     return allocate_handle(obj);
 513   }
 514 
 515   // No space available, we have to rebuild free list or expand
 516   if (_allocate_before_rebuild == 0) {
 517       rebuild_free_list();        // updates _allocate_before_rebuild counter
 518   } else {
 519     // Append new block
 520     Thread* thread = Thread::current();
 521     Handle obj_handle(thread, obj);
 522     // This can block, so we need to preserve obj across call.
 523     _last->_next = JNIHandleBlock::allocate_block(thread);
 524     _last = _last->_next;
 525     _allocate_before_rebuild--;
 526     obj = obj_handle();
 527   }
 528   return allocate_handle(obj);  // retry
 529 }
 530 
 531 void JNIHandleBlock::rebuild_free_list() {
 532   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 533   int free = 0;
 534   int blocks = 0;
 535   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 536     for (int index = 0; index < current->_top; index++) {
 537       uintptr_t* handle = &(current->_handles)[index];
 538       if (*handle == 0) {
 539         // this handle was cleared out by a delete call, reuse it
 540         *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
 541         _free_list = handle;
 542         free++;
 543       }
 544     }
 545     // we should not rebuild free list if there are unused handles at the end
 546     assert(current->_top == block_size_in_oops, "just checking");
 547     blocks++;
 548   }
 549   // Heuristic: if more than half of the handles are free we rebuild next time
 550   // as well, otherwise we append a corresponding number of new blocks before
 551   // attempting a free list rebuild again.
 552   int total = blocks * block_size_in_oops;
 553   int extra = total - 2*free;
 554   if (extra > 0) {
 555     // Not as many free handles as we would like - compute number of new blocks to append
 556     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
 557   }
 558 }
 559 
 560 
 561 bool JNIHandleBlock::contains(jobject handle) const {
 562   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
 563 }
 564 
 565 
 566 bool JNIHandleBlock::chain_contains(jobject handle) const {
 567   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
 568     if (current->contains(handle)) {
 569       return true;
 570     }
 571   }
 572   return false;
 573 }
 574 
 575 
 576 size_t JNIHandleBlock::length() const {
 577   size_t result = 1;
 578   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
 579     result++;
 580   }
 581   return result;
 582 }
 583 
 584 class CountJNIHandleClosure: public OopClosure {
 585 private:
 586   int _count;
 587 public:
 588   CountJNIHandleClosure(): _count(0) {}
 589   virtual void do_oop(oop* ooph) { _count++; }
 590   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
 591   int count() { return _count; }
 592 };
 593 
 594 const size_t JNIHandleBlock::get_number_of_live_handles() {
 595   CountJNIHandleClosure counter;
 596   oops_do(&counter);
 597   return counter.count();
 598 }
 599 
 600 // This method is not thread-safe, i.e., must be called while holding a lock on the
 601 // structure.
 602 size_t JNIHandleBlock::memory_usage() const {
 603   return length() * sizeof(JNIHandleBlock);
 604 }
 605 
 606 
 607 #ifndef PRODUCT
 608 
 609 bool JNIHandles::is_local_handle(jobject handle) {
 610   return JNIHandleBlock::any_contains(handle);
 611 }
 612 
 613 bool JNIHandleBlock::any_contains(jobject handle) {
 614   assert(handle != NULL, "precondition");
 615   for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
 616     if (current->contains(handle)) {
 617       return true;
 618     }
 619   }
 620   return false;
 621 }
 622 
 623 void JNIHandleBlock::print_statistics() {
 624   int used_blocks = 0;
 625   int free_blocks = 0;
 626   int used_handles = 0;
 627   int free_handles = 0;
 628   JNIHandleBlock* block = _block_list;
 629   while (block != NULL) {
 630     if (block->_top > 0) {
 631       used_blocks++;
 632     } else {
 633       free_blocks++;
 634     }
 635     used_handles += block->_top;
 636     free_handles += (block_size_in_oops - block->_top);
 637     block = block->_block_list_link;
 638   }
 639   tty->print_cr("JNIHandleBlocks statistics");
 640   tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks);
 641   tty->print_cr("- blocks in use:    %d", used_blocks);
 642   tty->print_cr("- blocks free:      %d", free_blocks);
 643   tty->print_cr("- handles in use:   %d", used_handles);
 644   tty->print_cr("- handles free:     %d", free_handles);
 645 }
 646 
 647 #endif