1 /*
   2  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "logging/log.hpp"
  28 #include "oops/oop.inline.hpp"
  29 #include "prims/jvmtiExport.hpp"
  30 #include "runtime/jniHandles.hpp"
  31 #include "runtime/mutexLocker.hpp"
  32 #include "runtime/thread.inline.hpp"
  33 
  34 JNIHandleBlock* JNIHandles::_global_handles       = NULL;
  35 JNIHandleBlock* JNIHandles::_weak_global_handles  = NULL;
  36 oop             JNIHandles::_deleted_handle       = NULL;
  37 
  38 
  39 jobject JNIHandles::make_local(oop obj) {
  40   if (obj == NULL) {
  41     return NULL;                // ignore null handles
  42   } else {
  43     Thread* thread = Thread::current();
  44     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  45     return thread->active_handles()->allocate_handle(obj);
  46   }
  47 }
  48 
  49 
  50 // optimized versions
  51 
  52 jobject JNIHandles::make_local(Thread* thread, oop obj) {
  53   if (obj == NULL) {
  54     return NULL;                // ignore null handles
  55   } else {
  56     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  57     return thread->active_handles()->allocate_handle(obj);
  58   }
  59 }
  60 
  61 
  62 jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
  63   if (obj == NULL) {
  64     return NULL;                // ignore null handles
  65   } else {
  66     JavaThread* thread = JavaThread::thread_from_jni_environment(env);
  67     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  68     return thread->active_handles()->allocate_handle(obj);
  69   }
  70 }
  71 
  72 
  73 jobject JNIHandles::make_global(Handle obj) {
  74   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  75   jobject res = NULL;
  76   if (!obj.is_null()) {
  77     // ignore null handles
  78     MutexLocker ml(JNIGlobalHandle_lock);
  79     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
  80     res = _global_handles->allocate_handle(obj());
  81   } else {
  82     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  83   }
  84 
  85   return res;
  86 }
  87 
  88 
  89 jobject JNIHandles::make_weak_global(Handle obj) {
  90   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  91   jobject res = NULL;
  92   if (!obj.is_null()) {
  93     // ignore null handles
  94     MutexLocker ml(JNIGlobalHandle_lock);
  95     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
  96     res = _weak_global_handles->allocate_handle(obj());
  97   } else {
  98     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  99   }
 100   return res;
 101 }
 102 
 103 
 104 void JNIHandles::destroy_global(jobject handle) {
 105   if (handle != NULL) {
 106     assert(is_global_handle(handle), "Invalid delete of global JNI handle");
 107     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
 108   }
 109 }
 110 
 111 
 112 void JNIHandles::destroy_weak_global(jobject handle) {
 113   if (handle != NULL) {
 114     assert(!CheckJNICalls || is_weak_global_handle(handle), "Invalid delete of weak global JNI handle");
 115     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
 116   }
 117 }
 118 
 119 
 120 void JNIHandles::oops_do(OopClosure* f) {
 121   f->do_oop(&_deleted_handle);
 122   _global_handles->oops_do(f);
 123 }
 124 
 125 
 126 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 127   _weak_global_handles->weak_oops_do(is_alive, f);
 128 }
 129 
 130 
 131 void JNIHandles::initialize() {
 132   _global_handles      = JNIHandleBlock::allocate_block();
 133   _weak_global_handles = JNIHandleBlock::allocate_block();
 134   EXCEPTION_MARK;
 135   // We will never reach the CATCH below since Exceptions::_throw will cause
 136   // the VM to exit if an exception is thrown during initialization
 137   Klass* k      = SystemDictionary::Object_klass();
 138   _deleted_handle = InstanceKlass::cast(k)->allocate_instance(CATCH);
 139 }
 140 
 141 
 142 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) {
 143   JNIHandleBlock* block = thread->active_handles();
 144 
 145   // Look back past possible native calls to jni_PushLocalFrame.
 146   while (block != NULL) {
 147     if (block->chain_contains(handle)) {
 148       return true;
 149     }
 150     block = block->pop_frame_link();
 151   }
 152   return false;
 153 }
 154 
 155 
 156 // Determine if the handle is somewhere in the current thread's stack.
 157 // We easily can't isolate any particular stack frame the handle might
 158 // come from, so we'll check the whole stack.
 159 
 160 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) {
 161   // If there is no java frame, then this must be top level code, such
 162   // as the java command executable, in which case, this type of handle
 163   // is not permitted.
 164   return (thr->has_last_Java_frame() &&
 165          (void*)obj < (void*)thr->stack_base() &&
 166          (void*)obj >= (void*)thr->last_Java_sp());
 167 }
 168 
 169 
 170 bool JNIHandles::is_global_handle(jobject handle) {
 171   return _global_handles->chain_contains(handle);
 172 }
 173 
 174 
 175 bool JNIHandles::is_weak_global_handle(jobject handle) {
 176   return _weak_global_handles->chain_contains(handle);
 177 }
 178 
 179 long JNIHandles::global_handle_memory_usage() {
 180   return _global_handles->memory_usage();
 181 }
 182 
 183 long JNIHandles::weak_global_handle_memory_usage() {
 184   return _weak_global_handles->memory_usage();
 185 }
 186 
 187 
 188 class AlwaysAliveClosure: public BoolObjectClosure {
 189 public:
 190   bool do_object_b(oop obj) { return true; }
 191 };
 192 
 193 class CountHandleClosure: public OopClosure {
 194 private:
 195   int _count;
 196 public:
 197   CountHandleClosure(): _count(0) {}
 198   virtual void do_oop(oop* ooph) {
 199     if (*ooph != JNIHandles::deleted_handle()) {
 200       _count++;
 201     }
 202   }
 203   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
 204   int count() { return _count; }
 205 };
 206 
 207 // We assume this is called at a safepoint: no lock is needed.
 208 void JNIHandles::print_on(outputStream* st) {
 209   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 210   assert(_global_handles != NULL && _weak_global_handles != NULL,
 211          "JNIHandles not initialized");
 212 
 213   CountHandleClosure global_handle_count;
 214   AlwaysAliveClosure always_alive;
 215   oops_do(&global_handle_count);
 216   weak_oops_do(&always_alive, &global_handle_count);
 217 
 218   st->print_cr("JNI global references: %d", global_handle_count.count());
 219   st->cr();
 220   st->flush();
 221 }
 222 
 223 class VerifyHandleClosure: public OopClosure {
 224 public:
 225   virtual void do_oop(oop* root) {
 226     (*root)->verify();
 227   }
 228   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
 229 };
 230 
 231 void JNIHandles::verify() {
 232   VerifyHandleClosure verify_handle;
 233   AlwaysAliveClosure always_alive;
 234 
 235   oops_do(&verify_handle);
 236   weak_oops_do(&always_alive, &verify_handle);
 237 }
 238 
 239 
 240 
 241 void jni_handles_init() {
 242   JNIHandles::initialize();
 243 }
 244 
 245 
 246 int             JNIHandleBlock::_blocks_allocated     = 0;
 247 JNIHandleBlock* JNIHandleBlock::_block_free_list      = NULL;
 248 #ifndef PRODUCT
 249 JNIHandleBlock* JNIHandleBlock::_block_list           = NULL;
 250 #endif
 251 
 252 
 253 void JNIHandleBlock::zap() {
 254   // Zap block values
 255   _top  = 0;
 256   for (int index = 0; index < block_size_in_oops; index++) {
 257     _handles[index] = badJNIHandle;
 258   }
 259 }
 260 
 261 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread)  {
 262   assert(thread == NULL || thread == Thread::current(), "sanity check");
 263   JNIHandleBlock* block;
 264   // Check the thread-local free list for a block so we don't
 265   // have to acquire a mutex.
 266   if (thread != NULL && thread->free_handle_block() != NULL) {
 267     block = thread->free_handle_block();
 268     thread->set_free_handle_block(block->_next);
 269   }
 270   else {
 271     // locking with safepoint checking introduces a potential deadlock:
 272     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 273     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 274     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 275     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 276                      Mutex::_no_safepoint_check_flag);
 277     if (_block_free_list == NULL) {
 278       // Allocate new block
 279       block = new JNIHandleBlock();
 280       _blocks_allocated++;
 281       if (TraceJNIHandleAllocation) {
 282         tty->print_cr("JNIHandleBlock " INTPTR_FORMAT " allocated (%d total blocks)",
 283                       p2i(block), _blocks_allocated);
 284       }
 285       if (ZapJNIHandleArea) block->zap();
 286       #ifndef PRODUCT
 287       // Link new block to list of all allocated blocks
 288       block->_block_list_link = _block_list;
 289       _block_list = block;
 290       #endif
 291     } else {
 292       // Get block from free list
 293       block = _block_free_list;
 294       _block_free_list = _block_free_list->_next;
 295     }
 296   }
 297   block->_top  = 0;
 298   block->_next = NULL;
 299   block->_pop_frame_link = NULL;
 300   block->_planned_capacity = block_size_in_oops;
 301   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 302   debug_only(block->_last = NULL);
 303   debug_only(block->_free_list = NULL);
 304   debug_only(block->_allocate_before_rebuild = -1);
 305   return block;
 306 }
 307 
 308 
 309 void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
 310   assert(thread == NULL || thread == Thread::current(), "sanity check");
 311   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
 312   // Put returned block at the beginning of the thread-local free list.
 313   // Note that if thread == NULL, we use it as an implicit argument that
 314   // we _don't_ want the block to be kept on the free_handle_block.
 315   // See for instance JavaThread::exit().
 316   if (thread != NULL ) {
 317     if (ZapJNIHandleArea) block->zap();
 318     JNIHandleBlock* freelist = thread->free_handle_block();
 319     block->_pop_frame_link = NULL;
 320     thread->set_free_handle_block(block);
 321 
 322     // Add original freelist to end of chain
 323     if ( freelist != NULL ) {
 324       while ( block->_next != NULL ) block = block->_next;
 325       block->_next = freelist;
 326     }
 327     block = NULL;
 328   }
 329   if (block != NULL) {
 330     // Return blocks to free list
 331     // locking with safepoint checking introduces a potential deadlock:
 332     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 333     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 334     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 335     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 336                      Mutex::_no_safepoint_check_flag);
 337     while (block != NULL) {
 338       if (ZapJNIHandleArea) block->zap();
 339       JNIHandleBlock* next = block->_next;
 340       block->_next = _block_free_list;
 341       _block_free_list = block;
 342       block = next;
 343     }
 344   }
 345   if (pop_frame_link != NULL) {
 346     // As a sanity check we release blocks pointed to by the pop_frame_link.
 347     // This should never happen (only if PopLocalFrame is not called the
 348     // correct number of times).
 349     release_block(pop_frame_link, thread);
 350   }
 351 }
 352 
 353 
 354 void JNIHandleBlock::oops_do(OopClosure* f) {
 355   JNIHandleBlock* current_chain = this;
 356   // Iterate over chain of blocks, followed by chains linked through the
 357   // pop frame links.
 358   while (current_chain != NULL) {
 359     for (JNIHandleBlock* current = current_chain; current != NULL;
 360          current = current->_next) {
 361       assert(current == current_chain || current->pop_frame_link() == NULL,
 362         "only blocks first in chain should have pop frame link set");
 363       for (int index = 0; index < current->_top; index++) {
 364         oop* root = &(current->_handles)[index];
 365         oop value = *root;
 366         // traverse heap pointers only, not deleted handles or free list
 367         // pointers
 368         if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 369           f->do_oop(root);
 370         }
 371       }
 372       // the next handle block is valid only if current block is full
 373       if (current->_top < block_size_in_oops) {
 374         break;
 375       }
 376     }
 377     current_chain = current_chain->pop_frame_link();
 378   }
 379 }
 380 
 381 
 382 void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
 383                                   OopClosure* f) {
 384   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 385     assert(current->pop_frame_link() == NULL,
 386       "blocks holding weak global JNI handles should not have pop frame link set");
 387     for (int index = 0; index < current->_top; index++) {
 388       oop* root = &(current->_handles)[index];
 389       oop value = *root;
 390       // traverse heap pointers only, not deleted handles or free list pointers
 391       if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 392         if (is_alive->do_object_b(value)) {
 393           // The weakly referenced object is alive, update pointer
 394           f->do_oop(root);
 395         } else {
 396           // The weakly referenced object is not alive, clear the reference by storing NULL
 397                   log_develop(gc, ref)("Clearing JNI weak reference (" INTPTR_FORMAT ")", p2i(root));
 398           *root = NULL;
 399         }
 400       }
 401     }
 402     // the next handle block is valid only if current block is full
 403     if (current->_top < block_size_in_oops) {
 404       break;
 405     }
 406   }
 407 
 408   /*
 409    * JVMTI data structures may also contain weak oops.  The iteration of them
 410    * is placed here so that we don't need to add it to each of the collectors.
 411    */
 412   JvmtiExport::weak_oops_do(is_alive, f);
 413 }
 414 
 415 
 416 jobject JNIHandleBlock::allocate_handle(oop obj) {
 417   assert(Universe::heap()->is_in_reserved(obj), "sanity check");
 418   if (_top == 0) {
 419     // This is the first allocation or the initial block got zapped when
 420     // entering a native function. If we have any following blocks they are
 421     // not valid anymore.
 422     for (JNIHandleBlock* current = _next; current != NULL;
 423          current = current->_next) {
 424       assert(current->_last == NULL, "only first block should have _last set");
 425       assert(current->_free_list == NULL,
 426              "only first block should have _free_list set");
 427       current->_top = 0;
 428       if (ZapJNIHandleArea) current->zap();
 429     }
 430     // Clear initial block
 431     _free_list = NULL;
 432     _allocate_before_rebuild = 0;
 433     _last = this;
 434     if (ZapJNIHandleArea) zap();
 435   }
 436 
 437   // Try last block
 438   if (_last->_top < block_size_in_oops) {
 439     oop* handle = &(_last->_handles)[_last->_top++];
 440     *handle = obj;
 441     return (jobject) handle;
 442   }
 443 
 444   // Try free list
 445   if (_free_list != NULL) {
 446     oop* handle = _free_list;
 447     _free_list = (oop*) *_free_list;
 448     *handle = obj;
 449     return (jobject) handle;
 450   }
 451   // Check if unused block follow last
 452   if (_last->_next != NULL) {
 453     // update last and retry
 454     _last = _last->_next;
 455     return allocate_handle(obj);
 456   }
 457 
 458   // No space available, we have to rebuild free list or expand
 459   if (_allocate_before_rebuild == 0) {
 460       rebuild_free_list();        // updates _allocate_before_rebuild counter
 461   } else {
 462     // Append new block
 463     Thread* thread = Thread::current();
 464     Handle obj_handle(thread, obj);
 465     // This can block, so we need to preserve obj across call.
 466     _last->_next = JNIHandleBlock::allocate_block(thread);
 467     _last = _last->_next;
 468     _allocate_before_rebuild--;
 469     obj = obj_handle();
 470   }
 471   return allocate_handle(obj);  // retry
 472 }
 473 
 474 
 475 void JNIHandleBlock::rebuild_free_list() {
 476   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 477   int free = 0;
 478   int blocks = 0;
 479   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 480     for (int index = 0; index < current->_top; index++) {
 481       oop* handle = &(current->_handles)[index];
 482       if (*handle ==  JNIHandles::deleted_handle()) {
 483         // this handle was cleared out by a delete call, reuse it
 484         *handle = (oop) _free_list;
 485         _free_list = handle;
 486         free++;
 487       }
 488     }
 489     // we should not rebuild free list if there are unused handles at the end
 490     assert(current->_top == block_size_in_oops, "just checking");
 491     blocks++;
 492   }
 493   // Heuristic: if more than half of the handles are free we rebuild next time
 494   // as well, otherwise we append a corresponding number of new blocks before
 495   // attempting a free list rebuild again.
 496   int total = blocks * block_size_in_oops;
 497   int extra = total - 2*free;
 498   if (extra > 0) {
 499     // Not as many free handles as we would like - compute number of new blocks to append
 500     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
 501   }
 502   if (TraceJNIHandleAllocation) {
 503     tty->print_cr("Rebuild free list JNIHandleBlock " INTPTR_FORMAT " blocks=%d used=%d free=%d add=%d",
 504                   p2i(this), blocks, total-free, free, _allocate_before_rebuild);
 505   }
 506 }
 507 
 508 
 509 bool JNIHandleBlock::contains(jobject handle) const {
 510   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
 511 }
 512 
 513 
 514 bool JNIHandleBlock::chain_contains(jobject handle) const {
 515   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
 516     if (current->contains(handle)) {
 517       return true;
 518     }
 519   }
 520   return false;
 521 }
 522 
 523 
 524 int JNIHandleBlock::length() const {
 525   int result = 1;
 526   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
 527     result++;
 528   }
 529   return result;
 530 }
 531 
 532 const size_t JNIHandleBlock::get_number_of_live_handles() {
 533   CountHandleClosure counter;
 534   oops_do(&counter);
 535   return counter.count();
 536 }
 537 
 538 // This method is not thread-safe, i.e., must be called while holding a lock on the
 539 // structure.
 540 long JNIHandleBlock::memory_usage() const {
 541   return length() * sizeof(JNIHandleBlock);
 542 }
 543 
 544 
 545 #ifndef PRODUCT
 546 
 547 bool JNIHandleBlock::any_contains(jobject handle) {
 548   for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
 549     if (current->contains(handle)) {
 550       return true;
 551     }
 552   }
 553   return false;
 554 }
 555 
 556 void JNIHandleBlock::print_statistics() {
 557   int used_blocks = 0;
 558   int free_blocks = 0;
 559   int used_handles = 0;
 560   int free_handles = 0;
 561   JNIHandleBlock* block = _block_list;
 562   while (block != NULL) {
 563     if (block->_top > 0) {
 564       used_blocks++;
 565     } else {
 566       free_blocks++;
 567     }
 568     used_handles += block->_top;
 569     free_handles += (block_size_in_oops - block->_top);
 570     block = block->_block_list_link;
 571   }
 572   tty->print_cr("JNIHandleBlocks statistics");
 573   tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks);
 574   tty->print_cr("- blocks in use:    %d", used_blocks);
 575   tty->print_cr("- blocks free:      %d", free_blocks);
 576   tty->print_cr("- handles in use:   %d", used_handles);
 577   tty->print_cr("- handles free:     %d", free_handles);
 578 }
 579 
 580 #endif