1 /*
   2  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "prims/jvmtiExport.hpp"
  29 #include "runtime/jniHandles.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/thread.inline.hpp"
  32 
  33 JNIHandleBlock* JNIHandles::_global_handles       = NULL;
  34 JNIHandleBlock* JNIHandles::_weak_global_handles  = NULL;
  35 oop             JNIHandles::_deleted_handle       = NULL;
  36 
  37 
  38 jobject JNIHandles::make_local(oop obj) {
  39   if (obj == NULL) {
  40     return NULL;                // ignore null handles
  41   } else {
  42     Thread* thread = Thread::current();
  43     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  44     return thread->active_handles()->allocate_handle(obj);
  45   }
  46 }
  47 
  48 
  49 // optimized versions
  50 
  51 jobject JNIHandles::make_local(Thread* thread, oop obj) {
  52   if (obj == NULL) {
  53     return NULL;                // ignore null handles
  54   } else {
  55     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  56     return thread->active_handles()->allocate_handle(obj);
  57   }
  58 }
  59 
  60 
  61 jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
  62   if (obj == NULL) {
  63     return NULL;                // ignore null handles
  64   } else {
  65     JavaThread* thread = JavaThread::thread_from_jni_environment(env);
  66     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  67     return thread->active_handles()->allocate_handle(obj);
  68   }
  69 }
  70 
  71 
  72 jobject JNIHandles::make_global(Handle obj) {
  73   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  74   jobject res = NULL;
  75   if (!obj.is_null()) {
  76     // ignore null handles
  77     MutexLocker ml(JNIGlobalHandle_lock);
  78     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
  79     res = _global_handles->allocate_handle(obj());
  80   } else {
  81     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  82   }
  83 
  84   return res;
  85 }
  86 
  87 
  88 jobject JNIHandles::make_weak_global(Handle obj) {
  89   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  90   jobject res = NULL;
  91   if (!obj.is_null()) {
  92     // ignore null handles
  93     MutexLocker ml(JNIGlobalHandle_lock);
  94     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
  95     res = _weak_global_handles->allocate_handle(obj());
  96   } else {
  97     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  98   }
  99   return res;
 100 }
 101 
 102 
 103 void JNIHandles::destroy_global(jobject handle) {
 104   if (handle != NULL) {
 105     assert(is_global_handle(handle), "Invalid delete of global JNI handle");
 106     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
 107   }
 108 }
 109 
 110 
 111 void JNIHandles::destroy_weak_global(jobject handle) {
 112   if (handle != NULL) {
 113     assert(!CheckJNICalls || is_weak_global_handle(handle), "Invalid delete of weak global JNI handle");
 114     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
 115   }
 116 }
 117 
 118 
 119 void JNIHandles::oops_do(OopClosure* f) {
 120   f->do_oop(&_deleted_handle);
 121   _global_handles->oops_do(f);
 122 }
 123 
 124 
 125 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 126   _weak_global_handles->weak_oops_do(is_alive, f);
 127 }
 128 
 129 
 130 void JNIHandles::initialize() {
 131   _global_handles      = JNIHandleBlock::allocate_block();
 132   _weak_global_handles = JNIHandleBlock::allocate_block();
 133   EXCEPTION_MARK;
 134   // We will never reach the CATCH below since Exceptions::_throw will cause
 135   // the VM to exit if an exception is thrown during initialization
 136   Klass* k      = SystemDictionary::Object_klass();
 137   _deleted_handle = InstanceKlass::cast(k)->allocate_instance(CATCH);
 138 }
 139 
 140 
 141 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) {
 142   JNIHandleBlock* block = thread->active_handles();
 143 
 144   // Look back past possible native calls to jni_PushLocalFrame.
 145   while (block != NULL) {
 146     if (block->chain_contains(handle)) {
 147       return true;
 148     }
 149     block = block->pop_frame_link();
 150   }
 151   return false;
 152 }
 153 
 154 
 155 // Determine if the handle is somewhere in the current thread's stack.
 156 // We easily can't isolate any particular stack frame the handle might
 157 // come from, so we'll check the whole stack.
 158 
 159 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) {
 160   // If there is no java frame, then this must be top level code, such
 161   // as the java command executable, in which case, this type of handle
 162   // is not permitted.
 163   return (thr->has_last_Java_frame() &&
 164          (void*)obj < (void*)thr->stack_base() &&
 165          (void*)obj >= (void*)thr->last_Java_sp());
 166 }
 167 
 168 
 169 bool JNIHandles::is_global_handle(jobject handle) {
 170   return _global_handles->chain_contains(handle);
 171 }
 172 
 173 
 174 bool JNIHandles::is_weak_global_handle(jobject handle) {
 175   return _weak_global_handles->chain_contains(handle);
 176 }
 177 
 178 long JNIHandles::global_handle_memory_usage() {
 179   return _global_handles->memory_usage();
 180 }
 181 
 182 long JNIHandles::weak_global_handle_memory_usage() {
 183   return _weak_global_handles->memory_usage();
 184 }
 185 
 186 
 187 class AlwaysAliveClosure: public BoolObjectClosure {
 188 public:
 189   bool do_object_b(oop obj) { return true; }
 190 };
 191 
 192 class CountHandleClosure: public OopClosure {
 193 private:
 194   int _count;
 195 public:
 196   CountHandleClosure(): _count(0) {}
 197   virtual void do_oop(oop* ooph) {
 198     if (*ooph != JNIHandles::deleted_handle()) {
 199       _count++;
 200     }
 201   }
 202   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
 203   int count() { return _count; }
 204 };
 205 
 206 // We assume this is called at a safepoint: no lock is needed.
 207 void JNIHandles::print_on(outputStream* st) {
 208   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 209   assert(_global_handles != NULL && _weak_global_handles != NULL,
 210          "JNIHandles not initialized");
 211 
 212   CountHandleClosure global_handle_count;
 213   AlwaysAliveClosure always_alive;
 214   oops_do(&global_handle_count);
 215   weak_oops_do(&always_alive, &global_handle_count);
 216 
 217   st->print_cr("JNI global references: %d", global_handle_count.count());
 218   st->cr();
 219   st->flush();
 220 }
 221 
 222 class VerifyHandleClosure: public OopClosure {
 223 public:
 224   virtual void do_oop(oop* root) {
 225     (*root)->verify();
 226   }
 227   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
 228 };
 229 
 230 void JNIHandles::verify() {
 231   VerifyHandleClosure verify_handle;
 232   AlwaysAliveClosure always_alive;
 233 
 234   oops_do(&verify_handle);
 235   weak_oops_do(&always_alive, &verify_handle);
 236 }
 237 
 238 
 239 
 240 void jni_handles_init() {
 241   JNIHandles::initialize();
 242 }
 243 
 244 
 245 int             JNIHandleBlock::_blocks_allocated     = 0;
 246 JNIHandleBlock* JNIHandleBlock::_block_free_list      = NULL;
 247 #ifndef PRODUCT
 248 JNIHandleBlock* JNIHandleBlock::_block_list           = NULL;
 249 #endif
 250 
 251 
 252 void JNIHandleBlock::zap() {
 253   // Zap block values
 254   _top  = 0;
 255   for (int index = 0; index < block_size_in_oops; index++) {
 256     _handles[index] = badJNIHandle;
 257   }
 258 }
 259 
 260 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread)  {
 261   assert(thread == NULL || thread == Thread::current(), "sanity check");
 262   JNIHandleBlock* block;
 263   // Check the thread-local free list for a block so we don't
 264   // have to acquire a mutex.
 265   if (thread != NULL && thread->free_handle_block() != NULL) {
 266     block = thread->free_handle_block();
 267     thread->set_free_handle_block(block->_next);
 268   }
 269   else {
 270     // locking with safepoint checking introduces a potential deadlock:
 271     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 272     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 273     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 274     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 275                      Mutex::_no_safepoint_check_flag);
 276     if (_block_free_list == NULL) {
 277       // Allocate new block
 278       block = new JNIHandleBlock();
 279       _blocks_allocated++;
 280       if (TraceJNIHandleAllocation) {
 281         tty->print_cr("JNIHandleBlock " INTPTR_FORMAT " allocated (%d total blocks)",
 282                       p2i(block), _blocks_allocated);
 283       }
 284       if (ZapJNIHandleArea) block->zap();
 285       #ifndef PRODUCT
 286       // Link new block to list of all allocated blocks
 287       block->_block_list_link = _block_list;
 288       _block_list = block;
 289       #endif
 290     } else {
 291       // Get block from free list
 292       block = _block_free_list;
 293       _block_free_list = _block_free_list->_next;
 294     }
 295   }
 296   block->_top  = 0;
 297   block->_next = NULL;
 298   block->_pop_frame_link = NULL;
 299   block->_planned_capacity = block_size_in_oops;
 300   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 301   debug_only(block->_last = NULL);
 302   debug_only(block->_free_list = NULL);
 303   debug_only(block->_allocate_before_rebuild = -1);
 304   return block;
 305 }
 306 
 307 
 308 void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
 309   assert(thread == NULL || thread == Thread::current(), "sanity check");
 310   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
 311   // Put returned block at the beginning of the thread-local free list.
 312   // Note that if thread == NULL, we use it as an implicit argument that
 313   // we _don't_ want the block to be kept on the free_handle_block.
 314   // See for instance JavaThread::exit().
 315   if (thread != NULL ) {
 316     if (ZapJNIHandleArea) block->zap();
 317     JNIHandleBlock* freelist = thread->free_handle_block();
 318     block->_pop_frame_link = NULL;
 319     thread->set_free_handle_block(block);
 320 
 321     // Add original freelist to end of chain
 322     if ( freelist != NULL ) {
 323       while ( block->_next != NULL ) block = block->_next;
 324       block->_next = freelist;
 325     }
 326     block = NULL;
 327   }
 328   if (block != NULL) {
 329     // Return blocks to free list
 330     // locking with safepoint checking introduces a potential deadlock:
 331     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 332     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 333     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 334     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 335                      Mutex::_no_safepoint_check_flag);
 336     while (block != NULL) {
 337       if (ZapJNIHandleArea) block->zap();
 338       JNIHandleBlock* next = block->_next;
 339       block->_next = _block_free_list;
 340       _block_free_list = block;
 341       block = next;
 342     }
 343   }
 344   if (pop_frame_link != NULL) {
 345     // As a sanity check we release blocks pointed to by the pop_frame_link.
 346     // This should never happen (only if PopLocalFrame is not called the
 347     // correct number of times).
 348     release_block(pop_frame_link, thread);
 349   }
 350 }
 351 
 352 
 353 void JNIHandleBlock::oops_do(OopClosure* f) {
 354   JNIHandleBlock* current_chain = this;
 355   // Iterate over chain of blocks, followed by chains linked through the
 356   // pop frame links.
 357   while (current_chain != NULL) {
 358     for (JNIHandleBlock* current = current_chain; current != NULL;
 359          current = current->_next) {
 360       assert(current == current_chain || current->pop_frame_link() == NULL,
 361         "only blocks first in chain should have pop frame link set");
 362       for (int index = 0; index < current->_top; index++) {
 363         oop* root = &(current->_handles)[index];
 364         oop value = *root;
 365         // traverse heap pointers only, not deleted handles or free list
 366         // pointers
 367         if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 368           f->do_oop(root);
 369         }
 370       }
 371       // the next handle block is valid only if current block is full
 372       if (current->_top < block_size_in_oops) {
 373         break;
 374       }
 375     }
 376     current_chain = current_chain->pop_frame_link();
 377   }
 378 }
 379 
 380 
 381 void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
 382                                   OopClosure* f) {
 383   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 384     assert(current->pop_frame_link() == NULL,
 385       "blocks holding weak global JNI handles should not have pop frame link set");
 386     for (int index = 0; index < current->_top; index++) {
 387       oop* root = &(current->_handles)[index];
 388       oop value = *root;
 389       // traverse heap pointers only, not deleted handles or free list pointers
 390       if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 391         if (is_alive->do_object_b(value)) {
 392           // The weakly referenced object is alive, update pointer
 393           f->do_oop(root);
 394         } else {
 395           // The weakly referenced object is not alive, clear the reference by storing NULL
 396           if (TraceReferenceGC) {
 397             tty->print_cr("Clearing JNI weak reference (" INTPTR_FORMAT ")", p2i(root));
 398           }
 399           *root = NULL;
 400         }
 401       }
 402     }
 403     // the next handle block is valid only if current block is full
 404     if (current->_top < block_size_in_oops) {
 405       break;
 406     }
 407   }
 408 
 409   /*
 410    * JVMTI data structures may also contain weak oops.  The iteration of them
 411    * is placed here so that we don't need to add it to each of the collectors.
 412    */
 413   JvmtiExport::weak_oops_do(is_alive, f);
 414 }
 415 
 416 
 417 jobject JNIHandleBlock::allocate_handle(oop obj) {
 418   assert(Universe::heap()->is_in_reserved(obj), "sanity check");
 419   if (_top == 0) {
 420     // This is the first allocation or the initial block got zapped when
 421     // entering a native function. If we have any following blocks they are
 422     // not valid anymore.
 423     for (JNIHandleBlock* current = _next; current != NULL;
 424          current = current->_next) {
 425       assert(current->_last == NULL, "only first block should have _last set");
 426       assert(current->_free_list == NULL,
 427              "only first block should have _free_list set");
 428       current->_top = 0;
 429       if (ZapJNIHandleArea) current->zap();
 430     }
 431     // Clear initial block
 432     _free_list = NULL;
 433     _allocate_before_rebuild = 0;
 434     _last = this;
 435     if (ZapJNIHandleArea) zap();
 436   }
 437 
 438   // Try last block
 439   if (_last->_top < block_size_in_oops) {
 440     oop* handle = &(_last->_handles)[_last->_top++];
 441     *handle = obj;
 442     return (jobject) handle;
 443   }
 444 
 445   // Try free list
 446   if (_free_list != NULL) {
 447     oop* handle = _free_list;
 448     _free_list = (oop*) *_free_list;
 449     *handle = obj;
 450     return (jobject) handle;
 451   }
 452   // Check if unused block follow last
 453   if (_last->_next != NULL) {
 454     // update last and retry
 455     _last = _last->_next;
 456     return allocate_handle(obj);
 457   }
 458 
 459   // No space available, we have to rebuild free list or expand
 460   if (_allocate_before_rebuild == 0) {
 461       rebuild_free_list();        // updates _allocate_before_rebuild counter
 462   } else {
 463     // Append new block
 464     Thread* thread = Thread::current();
 465     Handle obj_handle(thread, obj);
 466     // This can block, so we need to preserve obj across call.
 467     _last->_next = JNIHandleBlock::allocate_block(thread);
 468     _last = _last->_next;
 469     _allocate_before_rebuild--;
 470     obj = obj_handle();
 471   }
 472   return allocate_handle(obj);  // retry
 473 }
 474 
 475 
 476 void JNIHandleBlock::rebuild_free_list() {
 477   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 478   int free = 0;
 479   int blocks = 0;
 480   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 481     for (int index = 0; index < current->_top; index++) {
 482       oop* handle = &(current->_handles)[index];
 483       if (*handle ==  JNIHandles::deleted_handle()) {
 484         // this handle was cleared out by a delete call, reuse it
 485         *handle = (oop) _free_list;
 486         _free_list = handle;
 487         free++;
 488       }
 489     }
 490     // we should not rebuild free list if there are unused handles at the end
 491     assert(current->_top == block_size_in_oops, "just checking");
 492     blocks++;
 493   }
 494   // Heuristic: if more than half of the handles are free we rebuild next time
 495   // as well, otherwise we append a corresponding number of new blocks before
 496   // attempting a free list rebuild again.
 497   int total = blocks * block_size_in_oops;
 498   int extra = total - 2*free;
 499   if (extra > 0) {
 500     // Not as many free handles as we would like - compute number of new blocks to append
 501     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
 502   }
 503   if (TraceJNIHandleAllocation) {
 504     tty->print_cr("Rebuild free list JNIHandleBlock " INTPTR_FORMAT " blocks=%d used=%d free=%d add=%d",
 505                   p2i(this), blocks, total-free, free, _allocate_before_rebuild);
 506   }
 507 }
 508 
 509 
 510 bool JNIHandleBlock::contains(jobject handle) const {
 511   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
 512 }
 513 
 514 
 515 bool JNIHandleBlock::chain_contains(jobject handle) const {
 516   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
 517     if (current->contains(handle)) {
 518       return true;
 519     }
 520   }
 521   return false;
 522 }
 523 
 524 
 525 int JNIHandleBlock::length() const {
 526   int result = 1;
 527   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
 528     result++;
 529   }
 530   return result;
 531 }
 532 
 533 const size_t JNIHandleBlock::get_number_of_live_handles() {
 534   CountHandleClosure counter;
 535   oops_do(&counter);
 536   return counter.count();
 537 }
 538 
 539 // This method is not thread-safe, i.e., must be called while holding a lock on the
 540 // structure.
 541 long JNIHandleBlock::memory_usage() const {
 542   return length() * sizeof(JNIHandleBlock);
 543 }
 544 
 545 
 546 #ifndef PRODUCT
 547 
 548 bool JNIHandleBlock::any_contains(jobject handle) {
 549   for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
 550     if (current->contains(handle)) {
 551       return true;
 552     }
 553   }
 554   return false;
 555 }
 556 
 557 void JNIHandleBlock::print_statistics() {
 558   int used_blocks = 0;
 559   int free_blocks = 0;
 560   int used_handles = 0;
 561   int free_handles = 0;
 562   JNIHandleBlock* block = _block_list;
 563   while (block != NULL) {
 564     if (block->_top > 0) {
 565       used_blocks++;
 566     } else {
 567       free_blocks++;
 568     }
 569     used_handles += block->_top;
 570     free_handles += (block_size_in_oops - block->_top);
 571     block = block->_block_list_link;
 572   }
 573   tty->print_cr("JNIHandleBlocks statistics");
 574   tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks);
 575   tty->print_cr("- blocks in use:    %d", used_blocks);
 576   tty->print_cr("- blocks free:      %d", free_blocks);
 577   tty->print_cr("- handles in use:   %d", used_handles);
 578   tty->print_cr("- handles free:     %d", free_handles);
 579 }
 580 
 581 #endif