1 /*
   2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "jvmtifiles/jvmtiEnv.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/instanceKlass.hpp"
  31 #include "prims/jvmtiAgentThread.hpp"
  32 #include "prims/jvmtiEventController.inline.hpp"
  33 #include "prims/jvmtiImpl.hpp"
  34 #include "prims/jvmtiRedefineClasses.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/deoptimization.hpp"
  37 #include "runtime/handles.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/interfaceSupport.hpp"
  40 #include "runtime/javaCalls.hpp"
  41 #include "runtime/signature.hpp"
  42 #include "runtime/vframe.hpp"
  43 #include "runtime/vframe_hp.hpp"
  44 #include "runtime/vm_operations.hpp"
  45 #include "utilities/exceptions.hpp"
  46 #ifdef TARGET_OS_FAMILY_linux
  47 # include "thread_linux.inline.hpp"
  48 #endif
  49 #ifdef TARGET_OS_FAMILY_solaris
  50 # include "thread_solaris.inline.hpp"
  51 #endif
  52 #ifdef TARGET_OS_FAMILY_windows
  53 # include "thread_windows.inline.hpp"
  54 #endif
  55 
  56 //
  57 // class JvmtiAgentThread
  58 //
  59 // JavaThread used to wrap a thread started by an agent
  60 // using the JVMTI method RunAgentThread.
  61 //
  62 
  63 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
  64     : JavaThread(start_function_wrapper) {
  65     _env = env;
  66     _start_fn = start_fn;
  67     _start_arg = start_arg;
  68 }
  69 
  70 void
  71 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
  72     // It is expected that any Agent threads will be created as
  73     // Java Threads.  If this is the case, notification of the creation
  74     // of the thread is given in JavaThread::thread_main().
  75     assert(thread->is_Java_thread(), "debugger thread should be a Java Thread");
  76     assert(thread == JavaThread::current(), "sanity check");
  77 
  78     JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
  79     dthread->call_start_function();
  80 }
  81 
  82 void
  83 JvmtiAgentThread::call_start_function() {
  84     ThreadToNativeFromVM transition(this);
  85     _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
  86 }
  87 
  88 
  89 //
  90 // class GrowableCache - private methods
  91 //
  92 
  93 void GrowableCache::recache() {
  94   int len = _elements->length();
  95 
  96   FREE_C_HEAP_ARRAY(address, _cache);
  97   _cache = NEW_C_HEAP_ARRAY(address,len+1);
  98 
  99   for (int i=0; i<len; i++) {
 100     _cache[i] = _elements->at(i)->getCacheValue();
 101     //
 102     // The cache entry has gone bad. Without a valid frame pointer
 103     // value, the entry is useless so we simply delete it in product
 104     // mode. The call to remove() will rebuild the cache again
 105     // without the bad entry.
 106     //
 107     if (_cache[i] == NULL) {
 108       assert(false, "cannot recache NULL elements");
 109       remove(i);
 110       return;
 111     }
 112   }
 113   _cache[len] = NULL;
 114 
 115   _listener_fun(_this_obj,_cache);
 116 }
 117 
 118 bool GrowableCache::equals(void* v, GrowableElement *e2) {
 119   GrowableElement *e1 = (GrowableElement *) v;
 120   assert(e1 != NULL, "e1 != NULL");
 121   assert(e2 != NULL, "e2 != NULL");
 122 
 123   return e1->equals(e2);
 124 }
 125 
 126 //
 127 // class GrowableCache - public methods
 128 //
 129 
 130 GrowableCache::GrowableCache() {
 131   _this_obj       = NULL;
 132   _listener_fun   = NULL;
 133   _elements       = NULL;
 134   _cache          = NULL;
 135 }
 136 
 137 GrowableCache::~GrowableCache() {
 138   clear();
 139   delete _elements;
 140   FREE_C_HEAP_ARRAY(address, _cache);
 141 }
 142 
 143 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
 144   _this_obj       = this_obj;
 145   _listener_fun   = listener_fun;
 146   _elements       = new (ResourceObj::C_HEAP) GrowableArray<GrowableElement*>(5,true);
 147   recache();
 148 }
 149 
 150 // number of elements in the collection
 151 int GrowableCache::length() {
 152   return _elements->length();
 153 }
 154 
 155 // get the value of the index element in the collection
 156 GrowableElement* GrowableCache::at(int index) {
 157   GrowableElement *e = (GrowableElement *) _elements->at(index);
 158   assert(e != NULL, "e != NULL");
 159   return e;
 160 }
 161 
 162 int GrowableCache::find(GrowableElement* e) {
 163   return _elements->find(e, GrowableCache::equals);
 164 }
 165 
 166 // append a copy of the element to the end of the collection
 167 void GrowableCache::append(GrowableElement* e) {
 168   GrowableElement *new_e = e->clone();
 169   _elements->append(new_e);
 170   recache();
 171 }
 172 
 173 // insert a copy of the element using lessthan()
 174 void GrowableCache::insert(GrowableElement* e) {
 175   GrowableElement *new_e = e->clone();
 176   _elements->append(new_e);
 177 
 178   int n = length()-2;
 179   for (int i=n; i>=0; i--) {
 180     GrowableElement *e1 = _elements->at(i);
 181     GrowableElement *e2 = _elements->at(i+1);
 182     if (e2->lessThan(e1)) {
 183       _elements->at_put(i+1, e1);
 184       _elements->at_put(i,   e2);
 185     }
 186   }
 187 
 188   recache();
 189 }
 190 
 191 // remove the element at index
 192 void GrowableCache::remove (int index) {
 193   GrowableElement *e = _elements->at(index);
 194   assert(e != NULL, "e != NULL");
 195   _elements->remove(e);
 196   delete e;
 197   recache();
 198 }
 199 
 200 // clear out all elements, release all heap space and
 201 // let our listener know that things have changed.
 202 void GrowableCache::clear() {
 203   int len = _elements->length();
 204   for (int i=0; i<len; i++) {
 205     delete _elements->at(i);
 206   }
 207   _elements->clear();
 208   recache();
 209 }
 210 
 211 void GrowableCache::oops_do(OopClosure* f) {
 212   int len = _elements->length();
 213   for (int i=0; i<len; i++) {
 214     GrowableElement *e = _elements->at(i);
 215     e->oops_do(f);
 216   }
 217 }
 218 
 219 void GrowableCache::gc_epilogue() {
 220   int len = _elements->length();
 221   for (int i=0; i<len; i++) {
 222     _cache[i] = _elements->at(i)->getCacheValue();
 223   }
 224 }
 225 
 226 //
 227 // class JvmtiBreakpoint
 228 //
 229 
 230 JvmtiBreakpoint::JvmtiBreakpoint() {
 231   _method = NULL;
 232   _bci    = 0;
 233 #ifdef CHECK_UNHANDLED_OOPS
 234   // This one is always allocated with new, but check it just in case.
 235   Thread *thread = Thread::current();
 236   if (thread->is_in_stack((address)&_method)) {
 237     thread->allow_unhandled_oop((oop*)&_method);
 238   }
 239 #endif // CHECK_UNHANDLED_OOPS
 240 }
 241 
 242 JvmtiBreakpoint::JvmtiBreakpoint(methodOop m_method, jlocation location) {
 243   _method        = m_method;
 244   assert(_method != NULL, "_method != NULL");
 245   _bci           = (int) location;
 246 #ifdef CHECK_UNHANDLED_OOPS
 247   // Could be allocated with new and wouldn't be on the unhandled oop list.
 248   Thread *thread = Thread::current();
 249   if (thread->is_in_stack((address)&_method)) {
 250     thread->allow_unhandled_oop(&_method);
 251   }
 252 #endif // CHECK_UNHANDLED_OOPS
 253 
 254   assert(_bci >= 0, "_bci >= 0");
 255 }
 256 
 257 void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
 258   _method   = bp._method;
 259   _bci      = bp._bci;
 260 }
 261 
 262 bool JvmtiBreakpoint::lessThan(JvmtiBreakpoint& bp) {
 263   Unimplemented();
 264   return false;
 265 }
 266 
 267 bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
 268   return _method   == bp._method
 269     &&   _bci      == bp._bci;
 270 }
 271 
 272 bool JvmtiBreakpoint::is_valid() {
 273   return _method != NULL &&
 274          _bci >= 0;
 275 }
 276 
 277 address JvmtiBreakpoint::getBcp() {
 278   return _method->bcp_from(_bci);
 279 }
 280 
 281 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
 282   ((methodOopDesc*)_method->*meth_act)(_bci);
 283 
 284   // add/remove breakpoint to/from versions of the method that
 285   // are EMCP. Directly or transitively obsolete methods are
 286   // not saved in the PreviousVersionInfo.
 287   Thread *thread = Thread::current();
 288   instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
 289   symbolOop m_name = _method->name();
 290   symbolOop m_signature = _method->signature();
 291 
 292   {
 293     ResourceMark rm(thread);
 294     // PreviousVersionInfo objects returned via PreviousVersionWalker
 295     // contain a GrowableArray of handles. We have to clean up the
 296     // GrowableArray _after_ the PreviousVersionWalker destructor
 297     // has destroyed the handles.
 298     {
 299       // search previous versions if they exist
 300       PreviousVersionWalker pvw((instanceKlass *)ikh()->klass_part());
 301       for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
 302            pv_info != NULL; pv_info = pvw.next_previous_version()) {
 303         GrowableArray<methodHandle>* methods =
 304           pv_info->prev_EMCP_method_handles();
 305 
 306         if (methods == NULL) {
 307           // We have run into a PreviousVersion generation where
 308           // all methods were made obsolete during that generation's
 309           // RedefineClasses() operation. At the time of that
 310           // operation, all EMCP methods were flushed so we don't
 311           // have to go back any further.
 312           //
 313           // A NULL methods array is different than an empty methods
 314           // array. We cannot infer any optimizations about older
 315           // generations from an empty methods array for the current
 316           // generation.
 317           break;
 318         }
 319 
 320         for (int i = methods->length() - 1; i >= 0; i--) {
 321           methodHandle method = methods->at(i);
 322           if (method->name() == m_name && method->signature() == m_signature) {
 323             RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
 324               meth_act == &methodOopDesc::set_breakpoint ? "sett" : "clear",
 325               method->name()->as_C_string(),
 326               method->signature()->as_C_string()));
 327             assert(!method->is_obsolete(), "only EMCP methods here");
 328 
 329             ((methodOopDesc*)method()->*meth_act)(_bci);
 330             break;
 331           }
 332         }
 333       }
 334     } // pvw is cleaned up
 335   } // rm is cleaned up
 336 }
 337 
 338 void JvmtiBreakpoint::set() {
 339   each_method_version_do(&methodOopDesc::set_breakpoint);
 340 }
 341 
 342 void JvmtiBreakpoint::clear() {
 343   each_method_version_do(&methodOopDesc::clear_breakpoint);
 344 }
 345 
 346 void JvmtiBreakpoint::print() {
 347 #ifndef PRODUCT
 348   const char *class_name  = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
 349   const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
 350 
 351   tty->print("Breakpoint(%s,%s,%d,%p)",class_name, method_name, _bci, getBcp());
 352 #endif
 353 }
 354 
 355 
 356 //
 357 // class VM_ChangeBreakpoints
 358 //
 359 // Modify the Breakpoints data structure at a safepoint
 360 //
 361 
 362 void VM_ChangeBreakpoints::doit() {
 363   switch (_operation) {
 364   case SET_BREAKPOINT:
 365     _breakpoints->set_at_safepoint(*_bp);
 366     break;
 367   case CLEAR_BREAKPOINT:
 368     _breakpoints->clear_at_safepoint(*_bp);
 369     break;
 370   case CLEAR_ALL_BREAKPOINT:
 371     _breakpoints->clearall_at_safepoint();
 372     break;
 373   default:
 374     assert(false, "Unknown operation");
 375   }
 376 }
 377 
 378 void VM_ChangeBreakpoints::oops_do(OopClosure* f) {
 379   // This operation keeps breakpoints alive
 380   if (_breakpoints != NULL) {
 381     _breakpoints->oops_do(f);
 382   }
 383   if (_bp != NULL) {
 384     _bp->oops_do(f);
 385   }
 386 }
 387 
 388 //
 389 // class JvmtiBreakpoints
 390 //
 391 // a JVMTI internal collection of JvmtiBreakpoint
 392 //
 393 
 394 JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
 395   _bps.initialize(this,listener_fun);
 396 }
 397 
 398 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
 399 
 400 void  JvmtiBreakpoints::oops_do(OopClosure* f) {
 401   _bps.oops_do(f);
 402 }
 403 
 404 void JvmtiBreakpoints::gc_epilogue() {
 405   _bps.gc_epilogue();
 406 }
 407 
 408 void  JvmtiBreakpoints::print() {
 409 #ifndef PRODUCT
 410   ResourceMark rm;
 411 
 412   int n = _bps.length();
 413   for (int i=0; i<n; i++) {
 414     JvmtiBreakpoint& bp = _bps.at(i);
 415     tty->print("%d: ", i);
 416     bp.print();
 417     tty->print_cr("");
 418   }
 419 #endif
 420 }
 421 
 422 
 423 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
 424   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 425 
 426   int i = _bps.find(bp);
 427   if (i == -1) {
 428     _bps.append(bp);
 429     bp.set();
 430   }
 431 }
 432 
 433 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
 434   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 435 
 436   int i = _bps.find(bp);
 437   if (i != -1) {
 438     _bps.remove(i);
 439     bp.clear();
 440   }
 441 }
 442 
 443 void JvmtiBreakpoints::clearall_at_safepoint() {
 444   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 445 
 446   int len = _bps.length();
 447   for (int i=0; i<len; i++) {
 448     _bps.at(i).clear();
 449   }
 450   _bps.clear();
 451 }
 452 
 453 int JvmtiBreakpoints::length() { return _bps.length(); }
 454 
 455 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
 456   if ( _bps.find(bp) != -1) {
 457      return JVMTI_ERROR_DUPLICATE;
 458   }
 459   VM_ChangeBreakpoints set_breakpoint(this,VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
 460   VMThread::execute(&set_breakpoint);
 461   return JVMTI_ERROR_NONE;
 462 }
 463 
 464 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
 465   if ( _bps.find(bp) == -1) {
 466      return JVMTI_ERROR_NOT_FOUND;
 467   }
 468 
 469   VM_ChangeBreakpoints clear_breakpoint(this,VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
 470   VMThread::execute(&clear_breakpoint);
 471   return JVMTI_ERROR_NONE;
 472 }
 473 
 474 void JvmtiBreakpoints::clearall_in_class_at_safepoint(klassOop klass) {
 475   bool changed = true;
 476   // We are going to run thru the list of bkpts
 477   // and delete some.  This deletion probably alters
 478   // the list in some implementation defined way such
 479   // that when we delete entry i, the next entry might
 480   // no longer be at i+1.  To be safe, each time we delete
 481   // an entry, we'll just start again from the beginning.
 482   // We'll stop when we make a pass thru the whole list without
 483   // deleting anything.
 484   while (changed) {
 485     int len = _bps.length();
 486     changed = false;
 487     for (int i = 0; i < len; i++) {
 488       JvmtiBreakpoint& bp = _bps.at(i);
 489       if (bp.method()->method_holder() == klass) {
 490         bp.clear();
 491         _bps.remove(i);
 492         // This changed 'i' so we have to start over.
 493         changed = true;
 494         break;
 495       }
 496     }
 497   }
 498 }
 499 
 500 void JvmtiBreakpoints::clearall() {
 501   VM_ChangeBreakpoints clearall_breakpoint(this,VM_ChangeBreakpoints::CLEAR_ALL_BREAKPOINT);
 502   VMThread::execute(&clearall_breakpoint);
 503 }
 504 
 505 //
 506 // class JvmtiCurrentBreakpoints
 507 //
 508 
 509 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = NULL;
 510 address *         JvmtiCurrentBreakpoints::_breakpoint_list    = NULL;
 511 
 512 
 513 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
 514   if (_jvmti_breakpoints != NULL) return (*_jvmti_breakpoints);
 515   _jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
 516   assert(_jvmti_breakpoints != NULL, "_jvmti_breakpoints != NULL");
 517   return (*_jvmti_breakpoints);
 518 }
 519 
 520 void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
 521   JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
 522   assert(this_jvmti != NULL, "this_jvmti != NULL");
 523 
 524   debug_only(int n = this_jvmti->length(););
 525   assert(cache[n] == NULL, "cache must be NULL terminated");
 526 
 527   set_breakpoint_list(cache);
 528 }
 529 
 530 
 531 void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
 532   if (_jvmti_breakpoints != NULL) {
 533     _jvmti_breakpoints->oops_do(f);
 534   }
 535 }
 536 
 537 void JvmtiCurrentBreakpoints::gc_epilogue() {
 538   if (_jvmti_breakpoints != NULL) {
 539     _jvmti_breakpoints->gc_epilogue();
 540   }
 541 }
 542 
 543 ///////////////////////////////////////////////////////////////
 544 //
 545 // class VM_GetOrSetLocal
 546 //
 547 
 548 // Constructor for non-object getter
 549 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type)
 550   : _thread(thread)
 551   , _calling_thread(NULL)
 552   , _depth(depth)
 553   , _index(index)
 554   , _type(type)
 555   , _set(false)
 556   , _jvf(NULL)
 557   , _result(JVMTI_ERROR_NONE)
 558 {
 559 }
 560 
 561 // Constructor for object or non-object setter
 562 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type, jvalue value)
 563   : _thread(thread)
 564   , _calling_thread(NULL)
 565   , _depth(depth)
 566   , _index(index)
 567   , _type(type)
 568   , _value(value)
 569   , _set(true)
 570   , _jvf(NULL)
 571   , _result(JVMTI_ERROR_NONE)
 572 {
 573 }
 574 
 575 // Constructor for object getter
 576 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index)
 577   : _thread(thread)
 578   , _calling_thread(calling_thread)
 579   , _depth(depth)
 580   , _index(index)
 581   , _type(T_OBJECT)
 582   , _set(false)
 583   , _jvf(NULL)
 584   , _result(JVMTI_ERROR_NONE)
 585 {
 586 }
 587 
 588 vframe *VM_GetOrSetLocal::get_vframe() {
 589   if (!_thread->has_last_Java_frame()) {
 590     return NULL;
 591   }
 592   RegisterMap reg_map(_thread);
 593   vframe *vf = _thread->last_java_vframe(&reg_map);
 594   int d = 0;
 595   while ((vf != NULL) && (d < _depth)) {
 596     vf = vf->java_sender();
 597     d++;
 598   }
 599   return vf;
 600 }
 601 
 602 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
 603   vframe* vf = get_vframe();
 604   if (vf == NULL) {
 605     _result = JVMTI_ERROR_NO_MORE_FRAMES;
 606     return NULL;
 607   }
 608   javaVFrame *jvf = (javaVFrame*)vf;
 609 
 610   if (!vf->is_java_frame()) {
 611     _result = JVMTI_ERROR_OPAQUE_FRAME;
 612     return NULL;
 613   }
 614   return jvf;
 615 }
 616 
 617 // Check that the klass is assignable to a type with the given signature.
 618 // Another solution could be to use the function Klass::is_subtype_of(type).
 619 // But the type class can be forced to load/initialize eagerly in such a case.
 620 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
 621 // It is better to avoid such a behavior.
 622 bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
 623   assert(ty_sign != NULL, "type signature must not be NULL");
 624   assert(thread != NULL, "thread must not be NULL");
 625   assert(klass != NULL, "klass must not be NULL");
 626 
 627   int len = (int) strlen(ty_sign);
 628   if (ty_sign[0] == 'L' && ty_sign[len-1] == ';') { // Need pure class/interface name
 629     ty_sign++;
 630     len -= 2;
 631   }
 632   symbolHandle ty_sym = oopFactory::new_symbol_handle(ty_sign, len, thread);
 633   if (klass->name() == ty_sym()) {
 634     return true;
 635   }
 636   // Compare primary supers
 637   int super_depth = klass->super_depth();
 638   int idx;
 639   for (idx = 0; idx < super_depth; idx++) {
 640     if (Klass::cast(klass->primary_super_of_depth(idx))->name() == ty_sym()) {
 641       return true;
 642     }
 643   }
 644   // Compare secondary supers
 645   objArrayOop sec_supers = klass->secondary_supers();
 646   for (idx = 0; idx < sec_supers->length(); idx++) {
 647     if (Klass::cast((klassOop) sec_supers->obj_at(idx))->name() == ty_sym()) {
 648       return true;
 649     }
 650   }
 651   return false;
 652 }
 653 
 654 // Checks error conditions:
 655 //   JVMTI_ERROR_INVALID_SLOT
 656 //   JVMTI_ERROR_TYPE_MISMATCH
 657 // Returns: 'true' - everything is Ok, 'false' - error code
 658 
 659 bool VM_GetOrSetLocal::check_slot_type(javaVFrame* jvf) {
 660   methodOop method_oop = jvf->method();
 661   if (!method_oop->has_localvariable_table()) {
 662     // Just to check index boundaries
 663     jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
 664     if (_index < 0 || _index + extra_slot >= method_oop->max_locals()) {
 665       _result = JVMTI_ERROR_INVALID_SLOT;
 666       return false;
 667     }
 668     return true;
 669   }
 670 
 671   jint num_entries = method_oop->localvariable_table_length();
 672   if (num_entries == 0) {
 673     _result = JVMTI_ERROR_INVALID_SLOT;
 674     return false;       // There are no slots
 675   }
 676   int signature_idx = -1;
 677   int vf_bci = jvf->bci();
 678   LocalVariableTableElement* table = method_oop->localvariable_table_start();
 679   for (int i = 0; i < num_entries; i++) {
 680     int start_bci = table[i].start_bci;
 681     int end_bci = start_bci + table[i].length;
 682 
 683     // Here we assume that locations of LVT entries
 684     // with the same slot number cannot be overlapped
 685     if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
 686       signature_idx = (int) table[i].descriptor_cp_index;
 687       break;
 688     }
 689   }
 690   if (signature_idx == -1) {
 691     _result = JVMTI_ERROR_INVALID_SLOT;
 692     return false;       // Incorrect slot index
 693   }
 694   symbolOop   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
 695   const char* signature = (const char *) sign_sym->as_utf8();
 696   BasicType slot_type = char2type(signature[0]);
 697 
 698   switch (slot_type) {
 699   case T_BYTE:
 700   case T_SHORT:
 701   case T_CHAR:
 702   case T_BOOLEAN:
 703     slot_type = T_INT;
 704     break;
 705   case T_ARRAY:
 706     slot_type = T_OBJECT;
 707     break;
 708   };
 709   if (_type != slot_type) {
 710     _result = JVMTI_ERROR_TYPE_MISMATCH;
 711     return false;
 712   }
 713 
 714   jobject jobj = _value.l;
 715   if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
 716     // Check that the jobject class matches the return type signature.
 717     JavaThread* cur_thread = JavaThread::current();
 718     HandleMark hm(cur_thread);
 719 
 720     Handle obj = Handle(cur_thread, JNIHandles::resolve_external_guard(jobj));
 721     NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
 722     KlassHandle ob_kh = KlassHandle(cur_thread, obj->klass());
 723     NULL_CHECK(ob_kh, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
 724 
 725     if (!is_assignable(signature, Klass::cast(ob_kh()), cur_thread)) {
 726       _result = JVMTI_ERROR_TYPE_MISMATCH;
 727       return false;
 728     }
 729   }
 730   return true;
 731 }
 732 
 733 static bool can_be_deoptimized(vframe* vf) {
 734   return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
 735 }
 736 
 737 bool VM_GetOrSetLocal::doit_prologue() {
 738   _jvf = get_java_vframe();
 739   NULL_CHECK(_jvf, false);
 740 
 741   if (_jvf->method()->is_native()) {
 742     if (getting_receiver() && !_jvf->method()->is_static()) {
 743       return true;
 744     } else {
 745       _result = JVMTI_ERROR_OPAQUE_FRAME;
 746       return false;
 747     }
 748   }
 749 
 750   if (!check_slot_type(_jvf)) {
 751     return false;
 752   }
 753   return true;
 754 }
 755 
 756 void VM_GetOrSetLocal::doit() {
 757   if (_set) {
 758     // Force deoptimization of frame if compiled because it's
 759     // possible the compiler emitted some locals as constant values,
 760     // meaning they are not mutable.
 761     if (can_be_deoptimized(_jvf)) {
 762 
 763       // Schedule deoptimization so that eventually the local
 764       // update will be written to an interpreter frame.
 765       Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
 766 
 767       // Now store a new value for the local which will be applied
 768       // once deoptimization occurs. Note however that while this
 769       // write is deferred until deoptimization actually happens
 770       // can vframe created after this point will have its locals
 771       // reflecting this update so as far as anyone can see the
 772       // write has already taken place.
 773 
 774       // If we are updating an oop then get the oop from the handle
 775       // since the handle will be long gone by the time the deopt
 776       // happens. The oop stored in the deferred local will be
 777       // gc'd on its own.
 778       if (_type == T_OBJECT) {
 779         _value.l = (jobject) (JNIHandles::resolve_external_guard(_value.l));
 780       }
 781       // Re-read the vframe so we can see that it is deoptimized
 782       // [ Only need because of assert in update_local() ]
 783       _jvf = get_java_vframe();
 784       ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
 785       return;
 786     }
 787     StackValueCollection *locals = _jvf->locals();
 788     HandleMark hm;
 789 
 790     switch (_type) {
 791       case T_INT:    locals->set_int_at   (_index, _value.i); break;
 792       case T_LONG:   locals->set_long_at  (_index, _value.j); break;
 793       case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
 794       case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
 795       case T_OBJECT: {
 796         Handle ob_h(JNIHandles::resolve_external_guard(_value.l));
 797         locals->set_obj_at (_index, ob_h);
 798         break;
 799       }
 800       default: ShouldNotReachHere();
 801     }
 802     _jvf->set_locals(locals);
 803   } else {
 804     if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
 805       assert(getting_receiver(), "Can only get here when getting receiver");
 806       oop receiver = _jvf->fr().get_native_receiver();
 807       _value.l = JNIHandles::make_local(_calling_thread, receiver);
 808     } else {
 809       StackValueCollection *locals = _jvf->locals();
 810 
 811       if (locals->at(_index)->type() == T_CONFLICT) {
 812         memset(&_value, 0, sizeof(_value));
 813         _value.l = NULL;
 814         return;
 815       }
 816 
 817       switch (_type) {
 818         case T_INT:    _value.i = locals->int_at   (_index);   break;
 819         case T_LONG:   _value.j = locals->long_at  (_index);   break;
 820         case T_FLOAT:  _value.f = locals->float_at (_index);   break;
 821         case T_DOUBLE: _value.d = locals->double_at(_index);   break;
 822         case T_OBJECT: {
 823           // Wrap the oop to be returned in a local JNI handle since
 824           // oops_do() no longer applies after doit() is finished.
 825           oop obj = locals->obj_at(_index)();
 826           _value.l = JNIHandles::make_local(_calling_thread, obj);
 827           break;
 828         }
 829         default: ShouldNotReachHere();
 830       }
 831     }
 832   }
 833 }
 834 
 835 
 836 bool VM_GetOrSetLocal::allow_nested_vm_operations() const {
 837   return true; // May need to deoptimize
 838 }
 839 
 840 
 841 VM_GetReceiver::VM_GetReceiver(
 842     JavaThread* thread, JavaThread* caller_thread, jint depth)
 843     : VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
 844 
 845 /////////////////////////////////////////////////////////////////////////////////////////
 846 
 847 //
 848 // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
 849 //
 850 
 851 bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
 852   // external suspend should have caught suspending a thread twice
 853 
 854   // Immediate suspension required for JPDA back-end so JVMTI agent threads do
 855   // not deadlock due to later suspension on transitions while holding
 856   // raw monitors.  Passing true causes the immediate suspension.
 857   // java_suspend() will catch threads in the process of exiting
 858   // and will ignore them.
 859   java_thread->java_suspend();
 860 
 861   // It would be nice to have the following assertion in all the time,
 862   // but it is possible for a racing resume request to have resumed
 863   // this thread right after we suspended it. Temporarily enable this
 864   // assertion if you are chasing a different kind of bug.
 865   //
 866   // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
 867   //   java_thread->is_being_ext_suspended(), "thread is not suspended");
 868 
 869   if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
 870     // check again because we can get delayed in java_suspend():
 871     // the thread is in process of exiting.
 872     return false;
 873   }
 874 
 875   return true;
 876 }
 877 
 878 bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
 879   // external suspend should have caught resuming a thread twice
 880   assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
 881 
 882   // resume thread
 883   {
 884     // must always grab Threads_lock, see JVM_SuspendThread
 885     MutexLocker ml(Threads_lock);
 886     java_thread->java_resume();
 887   }
 888 
 889   return true;
 890 }
 891 
 892 
 893 void JvmtiSuspendControl::print() {
 894 #ifndef PRODUCT
 895   MutexLocker mu(Threads_lock);
 896   ResourceMark rm;
 897 
 898   tty->print("Suspended Threads: [");
 899   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 900 #if JVMTI_TRACE
 901     const char *name   = JvmtiTrace::safe_get_thread_name(thread);
 902 #else
 903     const char *name   = "";
 904 #endif /*JVMTI_TRACE */
 905     tty->print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
 906     if (!thread->has_last_Java_frame()) {
 907       tty->print("no stack");
 908     }
 909     tty->print(") ");
 910   }
 911   tty->print_cr("]");
 912 #endif
 913 }
 914 
 915 #ifndef KERNEL
 916 
 917 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_loaded_event(
 918     nmethod* nm) {
 919   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOADED);
 920   event.set_compiled_method_loaded(nm);
 921   nmethodLocker::lock_nmethod(nm); // will be unlocked when posted
 922   return event;
 923 }
 924 
 925 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unloaded_event(
 926     jmethodID id, const void* code) {
 927   JvmtiDeferredEvent event =
 928     JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOADED);
 929   event.set_compiled_method_unloaded(id, code);
 930   return event;
 931 }
 932 
 933 void JvmtiDeferredEvent::post() {
 934   switch(_type) {
 935     case TYPE_COMPILED_METHOD_LOADED:
 936       JvmtiExport::post_compiled_method_load(compiled_method_loaded());
 937       nmethodLocker::unlock_nmethod(compiled_method_loaded());
 938       break;
 939     case TYPE_COMPILED_METHOD_UNLOADED:
 940       JvmtiExport::post_compiled_method_unload(
 941         compiled_method_unloaded_method_id(),
 942         compiled_method_unloaded_code_begin());
 943       break;
 944     default:
 945       ShouldNotReachHere();
 946   }
 947 }
 948 
 949 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_tail = NULL;
 950 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_head = NULL;
 951 
 952 volatile JvmtiDeferredEventQueue::QueueNode*
 953     JvmtiDeferredEventQueue::_pending_list = NULL;
 954 
 955 bool JvmtiDeferredEventQueue::has_events() {
 956   assert(Service_lock->owned_by_self(), "Must own Service_lock");
 957   process_pending_events();
 958   return _queue_head != NULL;
 959 }
 960 
 961 void JvmtiDeferredEventQueue::enqueue(const JvmtiDeferredEvent& event) {
 962   assert(Service_lock->owned_by_self(), "Must own Service_lock");
 963 
 964   process_pending_events();
 965 
 966   // Events get added to the beginning of the queue
 967   QueueNode* node = new QueueNode(event);
 968   if (_queue_tail == NULL) {
 969     _queue_tail = _queue_head = node;
 970   } else {
 971     assert(_queue_tail->next() == NULL, "Must be the last element in the list");
 972     _queue_tail->set_next(node);
 973     _queue_tail = node;
 974   }
 975 
 976   Service_lock->notify_all();
 977   assert((_queue_head == NULL) == (_queue_tail == NULL),
 978          "Inconsistent queue markers");
 979 }
 980 
 981 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
 982   assert(Service_lock->owned_by_self(), "Must own Service_lock");
 983 
 984   process_pending_events();
 985 
 986   assert(_queue_head != NULL, "Nothing to dequeue");
 987 
 988   if (_queue_head == NULL) {
 989     // Just in case this happens in product; it shouldn't be let's not crash
 990     return JvmtiDeferredEvent();
 991   }
 992 
 993   QueueNode* node = _queue_head;
 994   _queue_head = _queue_head->next();
 995   if (_queue_head == NULL) {
 996     _queue_tail = NULL;
 997   }
 998 
 999   assert((_queue_head == NULL) == (_queue_tail == NULL),
1000          "Inconsistent queue markers");
1001 
1002   JvmtiDeferredEvent event = node->event();
1003   delete node;
1004   return event;
1005 }
1006 
1007 void JvmtiDeferredEventQueue::add_pending_event(
1008     const JvmtiDeferredEvent& event) {
1009 
1010   QueueNode* node = new QueueNode(event);
1011 
1012   while (true) {
1013     node->set_next((QueueNode*)_pending_list);
1014     QueueNode* old_value = (QueueNode*)Atomic::cmpxchg_ptr(
1015       (void*)node, (volatile void*)&_pending_list, node->next());
1016     if (old_value == node->next()) {
1017       break;
1018     }
1019   }
1020 }
1021 
1022 // This method transfers any events that were added by someone NOT holding
1023 // the lock into the mainline queue.
1024 void JvmtiDeferredEventQueue::process_pending_events() {
1025   assert(Service_lock->owned_by_self(), "Must own Service_lock");
1026 
1027   if (_pending_list != NULL) {
1028     QueueNode* head;
1029     while (true) {
1030       head = (QueueNode*)_pending_list;
1031       QueueNode* old_value = (QueueNode*)Atomic::cmpxchg_ptr(
1032         NULL, (volatile void*)&_pending_list, (void*)head);
1033       if (old_value == head) {
1034         break;
1035       }
1036     }
1037 
1038     assert((_queue_head == NULL) == (_queue_tail == NULL),
1039            "Inconsistent queue markers");
1040 
1041     if (head != NULL) {
1042       // Since we've treated the pending list as a stack (with newer
1043       // events at the beginning), we need to join the bottom of the stack
1044       // with the 'tail' of the queue in order to get the events in the
1045       // right order.  We do this by reversing the pending list and appending
1046       // it to the queue.
1047 
1048       QueueNode* new_tail = head;
1049       QueueNode* new_head = NULL;
1050 
1051       // This reverses the list
1052       QueueNode* prev = new_tail;
1053       QueueNode* node = new_tail->next();
1054       new_tail->set_next(NULL);
1055       while (node != NULL) {
1056         QueueNode* next = node->next();
1057         node->set_next(prev);
1058         prev = node;
1059         node = next;
1060       }
1061       new_head = prev;
1062 
1063       // Now append the new list to the queue
1064       if (_queue_tail != NULL) {
1065         _queue_tail->set_next(new_head);
1066       } else { // _queue_head == NULL
1067         _queue_head = new_head;
1068       }
1069       _queue_tail = new_tail;
1070 
1071       Service_lock->notify_all();
1072     }
1073   }
1074 }
1075 
1076 void JvmtiDeferredEventQueue::wait_for_empty_queue() {
1077   MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1078   while (has_events()) {
1079     Service_lock->notify_all();
1080     Service_lock->wait(Mutex::_no_safepoint_check_flag);
1081   }
1082 }
1083 
1084 void JvmtiDeferredEventQueue::notify_empty_queue() {
1085   assert(Service_lock->owned_by_self(), "Must own Service_lock");
1086   Service_lock->notify_all();
1087 }
1088 
1089 #endif // ndef KERNEL