1 /*
   2  * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "prims/jvmtiExport.hpp"
  29 #include "runtime/jniHandles.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/thread.inline.hpp"
  32 
  33 
  34 JNIHandleBlock* JNIHandles::_global_handles       = NULL;
  35 JNIHandleBlock* JNIHandles::_weak_global_handles  = NULL;
  36 oop             JNIHandles::_deleted_handle       = NULL;
  37 
  38 
  39 jobject JNIHandles::make_local(oop obj) {
  40   if (obj == NULL) {
  41     return NULL;                // ignore null handles
  42   } else {
  43     Thread* thread = Thread::current();
  44     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  45     return thread->active_handles()->allocate_handle(obj);
  46   }
  47 }
  48 
  49 
  50 // optimized versions
  51 
  52 jobject JNIHandles::make_local(Thread* thread, oop obj) {
  53   if (obj == NULL) {
  54     return NULL;                // ignore null handles
  55   } else {
  56     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  57     return thread->active_handles()->allocate_handle(obj);
  58   }
  59 }
  60 
  61 
  62 jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
  63   if (obj == NULL) {
  64     return NULL;                // ignore null handles
  65   } else {
  66     JavaThread* thread = JavaThread::thread_from_jni_environment(env);
  67     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  68     return thread->active_handles()->allocate_handle(obj);
  69   }
  70 }
  71 
  72 
  73 jobject JNIHandles::make_global(Handle obj) {
  74   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  75   jobject res = NULL;
  76   if (!obj.is_null()) {
  77     // ignore null handles
  78     MutexLocker ml(JNIGlobalHandle_lock);
  79     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
  80     res = _global_handles->allocate_handle(obj());
  81   } else {
  82     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  83   }
  84 
  85   return res;
  86 }
  87 
  88 
  89 jobject JNIHandles::make_weak_global(Handle obj) {
  90   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  91   jobject res = NULL;
  92   if (!obj.is_null()) {
  93     // ignore null handles
  94     MutexLocker ml(JNIGlobalHandle_lock);
  95     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
  96     res = _weak_global_handles->allocate_handle(obj());
  97   } else {
  98     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  99   }
 100   return res;
 101 }
 102 
 103 
 104 void JNIHandles::destroy_global(jobject handle) {
 105   if (handle != NULL) {
 106     assert(is_global_handle(handle), "Invalid delete of global JNI handle");
 107     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
 108   }
 109 }
 110 
 111 
 112 void JNIHandles::destroy_weak_global(jobject handle) {
 113   if (handle != NULL) {
 114     assert(!CheckJNICalls || is_weak_global_handle(handle), "Invalid delete of weak global JNI handle");
 115     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
 116   }
 117 }
 118 
 119 
 120 void JNIHandles::oops_do(OopClosure* f) {
 121   f->do_oop(&_deleted_handle);
 122   _global_handles->oops_do(f);
 123 }
 124 
 125 
 126 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 127   _weak_global_handles->weak_oops_do(is_alive, f);
 128 }
 129 
 130 
 131 void JNIHandles::initialize() {
 132   _global_handles      = JNIHandleBlock::allocate_block();
 133   _weak_global_handles = JNIHandleBlock::allocate_block();
 134   EXCEPTION_MARK;
 135   // We will never reach the CATCH below since Exceptions::_throw will cause
 136   // the VM to exit if an exception is thrown during initialization
 137   Klass* k      = SystemDictionary::Object_klass();
 138   _deleted_handle = InstanceKlass::cast(k)->allocate_instance(CATCH);
 139 }
 140 
 141 
 142 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) {
 143   JNIHandleBlock* block = thread->active_handles();
 144 
 145   // Look back past possible native calls to jni_PushLocalFrame.
 146   while (block != NULL) {
 147     if (block->chain_contains(handle)) {
 148       return true;
 149     }
 150     block = block->pop_frame_link();
 151   }
 152   return false;
 153 }
 154 
 155 
 156 // Determine if the handle is somewhere in the current thread's stack.
 157 // We easily can't isolate any particular stack frame the handle might
 158 // come from, so we'll check the whole stack.
 159 
 160 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) {
 161   // If there is no java frame, then this must be top level code, such
 162   // as the java command executable, in which case, this type of handle
 163   // is not permitted.
 164   return (thr->has_last_Java_frame() &&
 165          (void*)obj < (void*)thr->stack_base() &&
 166          (void*)obj >= (void*)thr->last_Java_sp());
 167 }
 168 
 169 
 170 bool JNIHandles::is_global_handle(jobject handle) {
 171   return _global_handles->chain_contains(handle);
 172 }
 173 
 174 
 175 bool JNIHandles::is_weak_global_handle(jobject handle) {
 176   return _weak_global_handles->chain_contains(handle);
 177 }
 178 
 179 long JNIHandles::global_handle_memory_usage() {
 180   return _global_handles->memory_usage();
 181 }
 182 
 183 long JNIHandles::weak_global_handle_memory_usage() {
 184   return _weak_global_handles->memory_usage();
 185 }
 186 
 187 
 188 class AlwaysAliveClosure: public BoolObjectClosure {
 189 public:
 190   bool do_object_b(oop obj) { return true; }
 191 };
 192 
 193 class CountHandleClosure: public OopClosure {
 194 private:
 195   int _count;
 196 public:
 197   CountHandleClosure(): _count(0) {}
 198   virtual void do_oop(oop* unused) {
 199     _count++;
 200   }
 201   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
 202   int count() { return _count; }
 203 };
 204 
 205 // We assume this is called at a safepoint: no lock is needed.
 206 void JNIHandles::print_on(outputStream* st) {
 207   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 208   assert(_global_handles != NULL && _weak_global_handles != NULL,
 209          "JNIHandles not initialized");
 210 
 211   CountHandleClosure global_handle_count;
 212   AlwaysAliveClosure always_alive;
 213   oops_do(&global_handle_count);
 214   weak_oops_do(&always_alive, &global_handle_count);
 215 
 216   st->print_cr("JNI global references: %d", global_handle_count.count());
 217   st->cr();
 218   st->flush();
 219 }
 220 
 221 class VerifyHandleClosure: public OopClosure {
 222 public:
 223   virtual void do_oop(oop* root) {
 224     (*root)->verify();
 225   }
 226   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
 227 };
 228 
 229 void JNIHandles::verify() {
 230   VerifyHandleClosure verify_handle;
 231   AlwaysAliveClosure always_alive;
 232 
 233   oops_do(&verify_handle);
 234   weak_oops_do(&always_alive, &verify_handle);
 235 }
 236 
 237 
 238 
 239 void jni_handles_init() {
 240   JNIHandles::initialize();
 241 }
 242 
 243 
 244 int             JNIHandleBlock::_blocks_allocated     = 0;
 245 JNIHandleBlock* JNIHandleBlock::_block_free_list      = NULL;
 246 #ifndef PRODUCT
 247 JNIHandleBlock* JNIHandleBlock::_block_list           = NULL;
 248 #endif
 249 
 250 
 251 void JNIHandleBlock::zap() {
 252   // Zap block values
 253   _top  = 0;
 254   for (int index = 0; index < block_size_in_oops; index++) {
 255     _handles[index] = badJNIHandle;
 256   }
 257 }
 258 
 259 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread)  {
 260   assert(thread == NULL || thread == Thread::current(), "sanity check");
 261   JNIHandleBlock* block;
 262   // Check the thread-local free list for a block so we don't
 263   // have to acquire a mutex.
 264   if (thread != NULL && thread->free_handle_block() != NULL) {
 265     block = thread->free_handle_block();
 266     thread->set_free_handle_block(block->_next);
 267   }
 268   else {
 269     // locking with safepoint checking introduces a potential deadlock:
 270     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 271     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 272     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 273     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 274                      Mutex::_no_safepoint_check_flag);
 275     if (_block_free_list == NULL) {
 276       // Allocate new block
 277       block = new JNIHandleBlock();
 278       _blocks_allocated++;
 279       if (TraceJNIHandleAllocation) {
 280         tty->print_cr("JNIHandleBlock " INTPTR_FORMAT " allocated (%d total blocks)",
 281                       block, _blocks_allocated);
 282       }
 283       if (ZapJNIHandleArea) block->zap();
 284       #ifndef PRODUCT
 285       // Link new block to list of all allocated blocks
 286       block->_block_list_link = _block_list;
 287       _block_list = block;
 288       #endif
 289     } else {
 290       // Get block from free list
 291       block = _block_free_list;
 292       _block_free_list = _block_free_list->_next;
 293     }
 294   }
 295   block->_top  = 0;
 296   block->_next = NULL;
 297   block->_pop_frame_link = NULL;
 298   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 299   debug_only(block->_last = NULL);
 300   debug_only(block->_free_list = NULL);
 301   debug_only(block->_allocate_before_rebuild = -1);
 302   return block;
 303 }
 304 
 305 
 306 void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
 307   assert(thread == NULL || thread == Thread::current(), "sanity check");
 308   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
 309   // Put returned block at the beginning of the thread-local free list.
 310   // Note that if thread == NULL, we use it as an implicit argument that
 311   // we _don't_ want the block to be kept on the free_handle_block.
 312   // See for instance JavaThread::exit().
 313   if (thread != NULL ) {
 314     if (ZapJNIHandleArea) block->zap();
 315     JNIHandleBlock* freelist = thread->free_handle_block();
 316     block->_pop_frame_link = NULL;
 317     thread->set_free_handle_block(block);
 318 
 319     // Add original freelist to end of chain
 320     if ( freelist != NULL ) {
 321       while ( block->_next != NULL ) block = block->_next;
 322       block->_next = freelist;
 323     }
 324     block = NULL;
 325   }
 326   if (block != NULL) {
 327     // Return blocks to free list
 328     // locking with safepoint checking introduces a potential deadlock:
 329     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 330     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 331     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 332     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 333                      Mutex::_no_safepoint_check_flag);
 334     while (block != NULL) {
 335       if (ZapJNIHandleArea) block->zap();
 336       JNIHandleBlock* next = block->_next;
 337       block->_next = _block_free_list;
 338       _block_free_list = block;
 339       block = next;
 340     }
 341   }
 342   if (pop_frame_link != NULL) {
 343     // As a sanity check we release blocks pointed to by the pop_frame_link.
 344     // This should never happen (only if PopLocalFrame is not called the
 345     // correct number of times).
 346     release_block(pop_frame_link, thread);
 347   }
 348 }
 349 
 350 
 351 void JNIHandleBlock::oops_do(OopClosure* f) {
 352   JNIHandleBlock* current_chain = this;
 353   // Iterate over chain of blocks, followed by chains linked through the
 354   // pop frame links.
 355   while (current_chain != NULL) {
 356     for (JNIHandleBlock* current = current_chain; current != NULL;
 357          current = current->_next) {
 358       assert(current == current_chain || current->pop_frame_link() == NULL,
 359         "only blocks first in chain should have pop frame link set");
 360       for (int index = 0; index < current->_top; index++) {
 361         oop* root = &(current->_handles)[index];
 362         oop value = *root;
 363         // traverse heap pointers only, not deleted handles or free list
 364         // pointers
 365         if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 366           f->do_oop(root);
 367         }
 368       }
 369       // the next handle block is valid only if current block is full
 370       if (current->_top < block_size_in_oops) {
 371         break;
 372       }
 373     }
 374     current_chain = current_chain->pop_frame_link();
 375   }
 376 }
 377 
 378 
 379 void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
 380                                   OopClosure* f) {
 381   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 382     assert(current->pop_frame_link() == NULL,
 383       "blocks holding weak global JNI handles should not have pop frame link set");
 384     for (int index = 0; index < current->_top; index++) {
 385       oop* root = &(current->_handles)[index];
 386       oop value = *root;
 387       // traverse heap pointers only, not deleted handles or free list pointers
 388       if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 389         if (is_alive->do_object_b(value)) {
 390           // The weakly referenced object is alive, update pointer
 391           f->do_oop(root);
 392         } else {
 393           // The weakly referenced object is not alive, clear the reference by storing NULL
 394           if (TraceReferenceGC) {
 395             tty->print_cr("Clearing JNI weak reference (" INTPTR_FORMAT ")", root);
 396           }
 397           *root = NULL;
 398         }
 399       }
 400     }
 401     // the next handle block is valid only if current block is full
 402     if (current->_top < block_size_in_oops) {
 403       break;
 404     }
 405   }
 406 
 407   /*
 408    * JVMTI data structures may also contain weak oops.  The iteration of them
 409    * is placed here so that we don't need to add it to each of the collectors.
 410    */
 411   JvmtiExport::weak_oops_do(is_alive, f);
 412 }
 413 
 414 
 415 jobject JNIHandleBlock::allocate_handle(oop obj) {
 416   assert(Universe::heap()->is_in_reserved(obj), "sanity check");
 417   if (_top == 0) {
 418     // This is the first allocation or the initial block got zapped when
 419     // entering a native function. If we have any following blocks they are
 420     // not valid anymore.
 421     for (JNIHandleBlock* current = _next; current != NULL;
 422          current = current->_next) {
 423       assert(current->_last == NULL, "only first block should have _last set");
 424       assert(current->_free_list == NULL,
 425              "only first block should have _free_list set");
 426       current->_top = 0;
 427       if (ZapJNIHandleArea) current->zap();
 428     }
 429     // Clear initial block
 430     _free_list = NULL;
 431     _allocate_before_rebuild = 0;
 432     _last = this;
 433     if (ZapJNIHandleArea) zap();
 434   }
 435 
 436   // Try last block
 437   if (_last->_top < block_size_in_oops) {
 438     oop* handle = &(_last->_handles)[_last->_top++];
 439     *handle = obj;
 440     return (jobject) handle;
 441   }
 442 
 443   // Try free list
 444   if (_free_list != NULL) {
 445     oop* handle = _free_list;
 446     _free_list = (oop*) *_free_list;
 447     *handle = obj;
 448     return (jobject) handle;
 449   }
 450   // Check if unused block follow last
 451   if (_last->_next != NULL) {
 452     // update last and retry
 453     _last = _last->_next;
 454     return allocate_handle(obj);
 455   }
 456 
 457   // No space available, we have to rebuild free list or expand
 458   if (_allocate_before_rebuild == 0) {
 459       rebuild_free_list();        // updates _allocate_before_rebuild counter
 460   } else {
 461     // Append new block
 462     Thread* thread = Thread::current();
 463     Handle obj_handle(thread, obj);
 464     // This can block, so we need to preserve obj across call.
 465     _last->_next = JNIHandleBlock::allocate_block(thread);
 466     _last = _last->_next;
 467     _allocate_before_rebuild--;
 468     obj = obj_handle();
 469   }
 470   return allocate_handle(obj);  // retry
 471 }
 472 
 473 
 474 void JNIHandleBlock::rebuild_free_list() {
 475   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 476   int free = 0;
 477   int blocks = 0;
 478   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 479     for (int index = 0; index < current->_top; index++) {
 480       oop* handle = &(current->_handles)[index];
 481       if (*handle ==  JNIHandles::deleted_handle()) {
 482         // this handle was cleared out by a delete call, reuse it
 483         *handle = (oop) _free_list;
 484         _free_list = handle;
 485         free++;
 486       }
 487     }
 488     // we should not rebuild free list if there are unused handles at the end
 489     assert(current->_top == block_size_in_oops, "just checking");
 490     blocks++;
 491   }
 492   // Heuristic: if more than half of the handles are free we rebuild next time
 493   // as well, otherwise we append a corresponding number of new blocks before
 494   // attempting a free list rebuild again.
 495   int total = blocks * block_size_in_oops;
 496   int extra = total - 2*free;
 497   if (extra > 0) {
 498     // Not as many free handles as we would like - compute number of new blocks to append
 499     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
 500   }
 501   if (TraceJNIHandleAllocation) {
 502     tty->print_cr("Rebuild free list JNIHandleBlock " INTPTR_FORMAT " blocks=%d used=%d free=%d add=%d",
 503       this, blocks, total-free, free, _allocate_before_rebuild);
 504   }
 505 }
 506 
 507 
 508 bool JNIHandleBlock::contains(jobject handle) const {
 509   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
 510 }
 511 
 512 
 513 bool JNIHandleBlock::chain_contains(jobject handle) const {
 514   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
 515     if (current->contains(handle)) {
 516       return true;
 517     }
 518   }
 519   return false;
 520 }
 521 
 522 
 523 int JNIHandleBlock::length() const {
 524   int result = 1;
 525   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
 526     result++;
 527   }
 528   return result;
 529 }
 530 
 531 // This method is not thread-safe, i.e., must be called while holding a lock on the
 532 // structure.
 533 long JNIHandleBlock::memory_usage() const {
 534   return length() * sizeof(JNIHandleBlock);
 535 }
 536 
 537 
 538 #ifndef PRODUCT
 539 
 540 bool JNIHandleBlock::any_contains(jobject handle) {
 541   for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
 542     if (current->contains(handle)) {
 543       return true;
 544     }
 545   }
 546   return false;
 547 }
 548 
 549 void JNIHandleBlock::print_statistics() {
 550   int used_blocks = 0;
 551   int free_blocks = 0;
 552   int used_handles = 0;
 553   int free_handles = 0;
 554   JNIHandleBlock* block = _block_list;
 555   while (block != NULL) {
 556     if (block->_top > 0) {
 557       used_blocks++;
 558     } else {
 559       free_blocks++;
 560     }
 561     used_handles += block->_top;
 562     free_handles += (block_size_in_oops - block->_top);
 563     block = block->_block_list_link;
 564   }
 565   tty->print_cr("JNIHandleBlocks statistics");
 566   tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks);
 567   tty->print_cr("- blocks in use:    %d", used_blocks);
 568   tty->print_cr("- blocks free:      %d", free_blocks);
 569   tty->print_cr("- handles in use:   %d", used_handles);
 570   tty->print_cr("- handles free:     %d", free_handles);
 571 }
 572 
 573 #endif