1 /*
   2  * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "gc/shared/oopStorage.hpp"
  30 #include "gc/shared/oopStorageSet.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/oopMapCache.hpp"
  33 #include "jvmtifiles/jvmtiEnv.hpp"
  34 #include "logging/log.hpp"
  35 #include "logging/logStream.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/instanceKlass.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "prims/jvmtiAgentThread.hpp"
  41 #include "prims/jvmtiEventController.inline.hpp"
  42 #include "prims/jvmtiImpl.hpp"
  43 #include "prims/jvmtiRedefineClasses.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/interfaceSupport.inline.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/os.hpp"
  50 #include "runtime/serviceThread.hpp"
  51 #include "runtime/signature.hpp"
  52 #include "runtime/thread.inline.hpp"
  53 #include "runtime/threadSMR.hpp"
  54 #include "runtime/vframe.hpp"
  55 #include "runtime/vframe_hp.hpp"
  56 #include "runtime/vmOperations.hpp"
  57 #include "utilities/exceptions.hpp"
  58 
  59 //
  60 // class JvmtiAgentThread
  61 //
  62 // JavaThread used to wrap a thread started by an agent
  63 // using the JVMTI method RunAgentThread.
  64 //
  65 
  66 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
  67     : JavaThread(start_function_wrapper) {
  68     _env = env;
  69     _start_fn = start_fn;
  70     _start_arg = start_arg;
  71 }
  72 
  73 void
  74 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
  75     // It is expected that any Agent threads will be created as
  76     // Java Threads.  If this is the case, notification of the creation
  77     // of the thread is given in JavaThread::thread_main().
  78     assert(thread->is_Java_thread(), "debugger thread should be a Java Thread");
  79     assert(thread == JavaThread::current(), "sanity check");
  80 
  81     JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
  82     dthread->call_start_function();
  83 }
  84 
  85 void
  86 JvmtiAgentThread::call_start_function() {
  87     ThreadToNativeFromVM transition(this);
  88     _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
  89 }
  90 
  91 
  92 //
  93 // class GrowableCache - private methods
  94 //
  95 
  96 void GrowableCache::recache() {
  97   int len = _elements->length();
  98 
  99   FREE_C_HEAP_ARRAY(address, _cache);
 100   _cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal);
 101 
 102   for (int i=0; i<len; i++) {
 103     _cache[i] = _elements->at(i)->getCacheValue();
 104     //
 105     // The cache entry has gone bad. Without a valid frame pointer
 106     // value, the entry is useless so we simply delete it in product
 107     // mode. The call to remove() will rebuild the cache again
 108     // without the bad entry.
 109     //
 110     if (_cache[i] == NULL) {
 111       assert(false, "cannot recache NULL elements");
 112       remove(i);
 113       return;
 114     }
 115   }
 116   _cache[len] = NULL;
 117 
 118   _listener_fun(_this_obj,_cache);
 119 }
 120 
 121 bool GrowableCache::equals(void* v, GrowableElement *e2) {
 122   GrowableElement *e1 = (GrowableElement *) v;
 123   assert(e1 != NULL, "e1 != NULL");
 124   assert(e2 != NULL, "e2 != NULL");
 125 
 126   return e1->equals(e2);
 127 }
 128 
 129 //
 130 // class GrowableCache - public methods
 131 //
 132 
 133 GrowableCache::GrowableCache() {
 134   _this_obj       = NULL;
 135   _listener_fun   = NULL;
 136   _elements       = NULL;
 137   _cache          = NULL;
 138 }
 139 
 140 GrowableCache::~GrowableCache() {
 141   clear();
 142   delete _elements;
 143   FREE_C_HEAP_ARRAY(address, _cache);
 144 }
 145 
 146 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
 147   _this_obj       = this_obj;
 148   _listener_fun   = listener_fun;
 149   _elements       = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<GrowableElement*>(5, mtServiceability);
 150   recache();
 151 }
 152 
 153 // number of elements in the collection
 154 int GrowableCache::length() {
 155   return _elements->length();
 156 }
 157 
 158 // get the value of the index element in the collection
 159 GrowableElement* GrowableCache::at(int index) {
 160   GrowableElement *e = (GrowableElement *) _elements->at(index);
 161   assert(e != NULL, "e != NULL");
 162   return e;
 163 }
 164 
 165 int GrowableCache::find(GrowableElement* e) {
 166   return _elements->find(e, GrowableCache::equals);
 167 }
 168 
 169 // append a copy of the element to the end of the collection
 170 void GrowableCache::append(GrowableElement* e) {
 171   GrowableElement *new_e = e->clone();
 172   _elements->append(new_e);
 173   recache();
 174 }
 175 
 176 // remove the element at index
 177 void GrowableCache::remove (int index) {
 178   GrowableElement *e = _elements->at(index);
 179   assert(e != NULL, "e != NULL");
 180   _elements->remove(e);
 181   delete e;
 182   recache();
 183 }
 184 
 185 // clear out all elements, release all heap space and
 186 // let our listener know that things have changed.
 187 void GrowableCache::clear() {
 188   int len = _elements->length();
 189   for (int i=0; i<len; i++) {
 190     delete _elements->at(i);
 191   }
 192   _elements->clear();
 193   recache();
 194 }
 195 
 196 //
 197 // class JvmtiBreakpoint
 198 //
 199 
 200 JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location)
 201     : _method(m_method), _bci((int)location), _class_holder(NULL) {
 202   assert(_method != NULL, "No method for breakpoint.");
 203   assert(_bci >= 0, "Negative bci for breakpoint.");
 204   oop class_holder_oop  = _method->method_holder()->klass_holder();
 205   _class_holder = OopStorageSet::vm_global()->allocate();
 206   if (_class_holder == NULL) {
 207     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
 208                           "Cannot create breakpoint oop handle");
 209   }
 210   NativeAccess<>::oop_store(_class_holder, class_holder_oop);
 211 }
 212 
 213 JvmtiBreakpoint::~JvmtiBreakpoint() {
 214   if (_class_holder != NULL) {
 215     NativeAccess<>::oop_store(_class_holder, (oop)NULL);
 216     OopStorageSet::vm_global()->release(_class_holder);
 217   }
 218 }
 219 
 220 void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
 221   _method   = bp._method;
 222   _bci      = bp._bci;
 223   _class_holder = OopStorageSet::vm_global()->allocate();
 224   if (_class_holder == NULL) {
 225     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
 226                           "Cannot create breakpoint oop handle");
 227   }
 228   oop resolved_ch = NativeAccess<>::oop_load(bp._class_holder);
 229   NativeAccess<>::oop_store(_class_holder, resolved_ch);
 230 }
 231 
 232 bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
 233   return _method   == bp._method
 234     &&   _bci      == bp._bci;
 235 }
 236 
 237 address JvmtiBreakpoint::getBcp() const {
 238   return _method->bcp_from(_bci);
 239 }
 240 
 241 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
 242   ((Method*)_method->*meth_act)(_bci);
 243 
 244   // add/remove breakpoint to/from versions of the method that are EMCP.
 245   Thread *thread = Thread::current();
 246   InstanceKlass* ik = _method->method_holder();
 247   Symbol* m_name = _method->name();
 248   Symbol* m_signature = _method->signature();
 249 
 250   // search previous versions if they exist
 251   for (InstanceKlass* pv_node = ik->previous_versions();
 252        pv_node != NULL;
 253        pv_node = pv_node->previous_versions()) {
 254     Array<Method*>* methods = pv_node->methods();
 255 
 256     for (int i = methods->length() - 1; i >= 0; i--) {
 257       Method* method = methods->at(i);
 258       // Only set breakpoints in running EMCP methods.
 259       if (method->is_running_emcp() &&
 260           method->name() == m_name &&
 261           method->signature() == m_signature) {
 262         ResourceMark rm;
 263         log_debug(redefine, class, breakpoint)
 264           ("%sing breakpoint in %s(%s)", meth_act == &Method::set_breakpoint ? "sett" : "clear",
 265            method->name()->as_C_string(), method->signature()->as_C_string());
 266         (method->*meth_act)(_bci);
 267         break;
 268       }
 269     }
 270   }
 271 }
 272 
 273 void JvmtiBreakpoint::set() {
 274   each_method_version_do(&Method::set_breakpoint);
 275 }
 276 
 277 void JvmtiBreakpoint::clear() {
 278   each_method_version_do(&Method::clear_breakpoint);
 279 }
 280 
 281 void JvmtiBreakpoint::print_on(outputStream* out) const {
 282 #ifndef PRODUCT
 283   ResourceMark rm;
 284   const char *class_name  = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
 285   const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
 286   out->print("Breakpoint(%s,%s,%d,%p)", class_name, method_name, _bci, getBcp());
 287 #endif
 288 }
 289 
 290 
 291 //
 292 // class VM_ChangeBreakpoints
 293 //
 294 // Modify the Breakpoints data structure at a safepoint
 295 //
 296 
 297 void VM_ChangeBreakpoints::doit() {
 298   switch (_operation) {
 299   case SET_BREAKPOINT:
 300     _breakpoints->set_at_safepoint(*_bp);
 301     break;
 302   case CLEAR_BREAKPOINT:
 303     _breakpoints->clear_at_safepoint(*_bp);
 304     break;
 305   default:
 306     assert(false, "Unknown operation");
 307   }
 308 }
 309 
 310 //
 311 // class JvmtiBreakpoints
 312 //
 313 // a JVMTI internal collection of JvmtiBreakpoint
 314 //
 315 
 316 JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
 317   _bps.initialize(this,listener_fun);
 318 }
 319 
 320 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
 321 
 322 void JvmtiBreakpoints::print() {
 323 #ifndef PRODUCT
 324   LogTarget(Trace, jvmti) log;
 325   LogStream log_stream(log);
 326 
 327   int n = _bps.length();
 328   for (int i=0; i<n; i++) {
 329     JvmtiBreakpoint& bp = _bps.at(i);
 330     log_stream.print("%d: ", i);
 331     bp.print_on(&log_stream);
 332     log_stream.cr();
 333   }
 334 #endif
 335 }
 336 
 337 
 338 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
 339   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 340 
 341   int i = _bps.find(bp);
 342   if (i == -1) {
 343     _bps.append(bp);
 344     bp.set();
 345   }
 346 }
 347 
 348 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
 349   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 350 
 351   int i = _bps.find(bp);
 352   if (i != -1) {
 353     _bps.remove(i);
 354     bp.clear();
 355   }
 356 }
 357 
 358 int JvmtiBreakpoints::length() { return _bps.length(); }
 359 
 360 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
 361   if ( _bps.find(bp) != -1) {
 362      return JVMTI_ERROR_DUPLICATE;
 363   }
 364   VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
 365   VMThread::execute(&set_breakpoint);
 366   return JVMTI_ERROR_NONE;
 367 }
 368 
 369 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
 370   if ( _bps.find(bp) == -1) {
 371      return JVMTI_ERROR_NOT_FOUND;
 372   }
 373 
 374   VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
 375   VMThread::execute(&clear_breakpoint);
 376   return JVMTI_ERROR_NONE;
 377 }
 378 
 379 void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
 380   bool changed = true;
 381   // We are going to run thru the list of bkpts
 382   // and delete some.  This deletion probably alters
 383   // the list in some implementation defined way such
 384   // that when we delete entry i, the next entry might
 385   // no longer be at i+1.  To be safe, each time we delete
 386   // an entry, we'll just start again from the beginning.
 387   // We'll stop when we make a pass thru the whole list without
 388   // deleting anything.
 389   while (changed) {
 390     int len = _bps.length();
 391     changed = false;
 392     for (int i = 0; i < len; i++) {
 393       JvmtiBreakpoint& bp = _bps.at(i);
 394       if (bp.method()->method_holder() == klass) {
 395         bp.clear();
 396         _bps.remove(i);
 397         // This changed 'i' so we have to start over.
 398         changed = true;
 399         break;
 400       }
 401     }
 402   }
 403 }
 404 
 405 //
 406 // class JvmtiCurrentBreakpoints
 407 //
 408 
 409 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = NULL;
 410 address *         JvmtiCurrentBreakpoints::_breakpoint_list    = NULL;
 411 
 412 
 413 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
 414   if (_jvmti_breakpoints != NULL) return (*_jvmti_breakpoints);
 415   _jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
 416   assert(_jvmti_breakpoints != NULL, "_jvmti_breakpoints != NULL");
 417   return (*_jvmti_breakpoints);
 418 }
 419 
 420 void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
 421   JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
 422   assert(this_jvmti != NULL, "this_jvmti != NULL");
 423 
 424   debug_only(int n = this_jvmti->length(););
 425   assert(cache[n] == NULL, "cache must be NULL terminated");
 426 
 427   set_breakpoint_list(cache);
 428 }
 429 
 430 ///////////////////////////////////////////////////////////////
 431 //
 432 // class VM_GetOrSetLocal
 433 //
 434 
 435 // Constructor for non-object getter
 436 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type)
 437   : _thread(thread)
 438   , _calling_thread(NULL)
 439   , _depth(depth)
 440   , _index(index)
 441   , _type(type)
 442   , _jvf(NULL)
 443   , _set(false)
 444   , _eb(NULL, NULL, false) // no references escape
 445   , _result(JVMTI_ERROR_NONE)
 446 {
 447 }
 448 
 449 // Constructor for object or non-object setter
 450 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, jvalue value)
 451   : _thread(thread)
 452   , _calling_thread(NULL)
 453   , _depth(depth)
 454   , _index(index)
 455   , _type(type)
 456   , _value(value)
 457   , _jvf(NULL)
 458   , _set(true)
 459   , _eb(JavaThread::current(), thread, type == T_OBJECT)
 460   , _result(JVMTI_ERROR_NONE)
 461 {
 462 }
 463 
 464 // Constructor for object getter
 465 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index)
 466   : _thread(thread)
 467   , _calling_thread(calling_thread)
 468   , _depth(depth)
 469   , _index(index)
 470   , _type(T_OBJECT)
 471   , _jvf(NULL)
 472   , _set(false)
 473   , _eb(calling_thread, thread, true)
 474   , _result(JVMTI_ERROR_NONE)
 475 {
 476 }
 477 
 478 vframe *VM_GetOrSetLocal::get_vframe() {
 479   if (!_thread->has_last_Java_frame()) {
 480     return NULL;
 481   }
 482   RegisterMap reg_map(_thread);
 483   vframe *vf = _thread->last_java_vframe(&reg_map);
 484   int d = 0;
 485   while ((vf != NULL) && (d < _depth)) {
 486     vf = vf->java_sender();
 487     d++;
 488   }
 489   return vf;
 490 }
 491 
 492 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
 493   vframe* vf = get_vframe();
 494   if (vf == NULL) {
 495     _result = JVMTI_ERROR_NO_MORE_FRAMES;
 496     return NULL;
 497   }
 498   javaVFrame *jvf = (javaVFrame*)vf;
 499 
 500   if (!vf->is_java_frame()) {
 501     _result = JVMTI_ERROR_OPAQUE_FRAME;
 502     return NULL;
 503   }
 504   return jvf;
 505 }
 506 
 507 // Check that the klass is assignable to a type with the given signature.
 508 // Another solution could be to use the function Klass::is_subtype_of(type).
 509 // But the type class can be forced to load/initialize eagerly in such a case.
 510 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
 511 // It is better to avoid such a behavior.
 512 bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
 513   assert(ty_sign != NULL, "type signature must not be NULL");
 514   assert(thread != NULL, "thread must not be NULL");
 515   assert(klass != NULL, "klass must not be NULL");
 516 
 517   int len = (int) strlen(ty_sign);
 518   if (ty_sign[0] == JVM_SIGNATURE_CLASS &&
 519       ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
 520     ty_sign++;
 521     len -= 2;
 522   }
 523   TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len);
 524   if (klass->name() == ty_sym) {
 525     return true;
 526   }
 527   // Compare primary supers
 528   int super_depth = klass->super_depth();
 529   int idx;
 530   for (idx = 0; idx < super_depth; idx++) {
 531     if (klass->primary_super_of_depth(idx)->name() == ty_sym) {
 532       return true;
 533     }
 534   }
 535   // Compare secondary supers
 536   const Array<Klass*>* sec_supers = klass->secondary_supers();
 537   for (idx = 0; idx < sec_supers->length(); idx++) {
 538     if (((Klass*) sec_supers->at(idx))->name() == ty_sym) {
 539       return true;
 540     }
 541   }
 542   return false;
 543 }
 544 
 545 // Checks error conditions:
 546 //   JVMTI_ERROR_INVALID_SLOT
 547 //   JVMTI_ERROR_TYPE_MISMATCH
 548 // Returns: 'true' - everything is Ok, 'false' - error code
 549 
 550 bool VM_GetOrSetLocal::check_slot_type_lvt(javaVFrame* jvf) {
 551   Method* method_oop = jvf->method();
 552   jint num_entries = method_oop->localvariable_table_length();
 553   if (num_entries == 0) {
 554     _result = JVMTI_ERROR_INVALID_SLOT;
 555     return false;       // There are no slots
 556   }
 557   int signature_idx = -1;
 558   int vf_bci = jvf->bci();
 559   LocalVariableTableElement* table = method_oop->localvariable_table_start();
 560   for (int i = 0; i < num_entries; i++) {
 561     int start_bci = table[i].start_bci;
 562     int end_bci = start_bci + table[i].length;
 563 
 564     // Here we assume that locations of LVT entries
 565     // with the same slot number cannot be overlapped
 566     if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
 567       signature_idx = (int) table[i].descriptor_cp_index;
 568       break;
 569     }
 570   }
 571   if (signature_idx == -1) {
 572     _result = JVMTI_ERROR_INVALID_SLOT;
 573     return false;       // Incorrect slot index
 574   }
 575   Symbol*   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
 576   BasicType slot_type = Signature::basic_type(sign_sym);
 577 
 578   switch (slot_type) {
 579   case T_BYTE:
 580   case T_SHORT:
 581   case T_CHAR:
 582   case T_BOOLEAN:
 583     slot_type = T_INT;
 584     break;
 585   case T_ARRAY:
 586     slot_type = T_OBJECT;
 587     break;
 588   default:
 589     break;
 590   };
 591   if (_type != slot_type) {
 592     _result = JVMTI_ERROR_TYPE_MISMATCH;
 593     return false;
 594   }
 595 
 596   jobject jobj = _value.l;
 597   if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
 598     // Check that the jobject class matches the return type signature.
 599     JavaThread* cur_thread = JavaThread::current();
 600     HandleMark hm(cur_thread);
 601 
 602     Handle obj(cur_thread, JNIHandles::resolve_external_guard(jobj));
 603     NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
 604     Klass* ob_k = obj->klass();
 605     NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
 606 
 607     const char* signature = (const char *) sign_sym->as_utf8();
 608     if (!is_assignable(signature, ob_k, cur_thread)) {
 609       _result = JVMTI_ERROR_TYPE_MISMATCH;
 610       return false;
 611     }
 612   }
 613   return true;
 614 }
 615 
 616 bool VM_GetOrSetLocal::check_slot_type_no_lvt(javaVFrame* jvf) {
 617   Method* method_oop = jvf->method();
 618   jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
 619 
 620   if (_index < 0 || _index + extra_slot >= method_oop->max_locals()) {
 621     _result = JVMTI_ERROR_INVALID_SLOT;
 622     return false;
 623   }
 624   StackValueCollection *locals = _jvf->locals();
 625   BasicType slot_type = locals->at(_index)->type();
 626 
 627   if (slot_type == T_CONFLICT) {
 628     _result = JVMTI_ERROR_INVALID_SLOT;
 629     return false;
 630   }
 631   if (extra_slot) {
 632     BasicType extra_slot_type = locals->at(_index + 1)->type();
 633     if (extra_slot_type != T_INT) {
 634       _result = JVMTI_ERROR_INVALID_SLOT;
 635       return false;
 636     }
 637   }
 638   if (_type != slot_type && (_type == T_OBJECT || slot_type != T_INT)) {
 639     _result = JVMTI_ERROR_TYPE_MISMATCH;
 640     return false;
 641   }
 642   return true;
 643 }
 644 
 645 static bool can_be_deoptimized(vframe* vf) {
 646   return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
 647 }
 648 
 649 // Revert optimizations based on escape analysis if this is an access to a local object
 650 bool VM_GetOrSetLocal::deoptimize_objects(javaVFrame* jvf) {
 651 #if COMPILER2_OR_JVMCI
 652   if (NOT_JVMCI(DoEscapeAnalysis &&) _type == T_OBJECT) {
 653     if (_depth < _thread->frames_to_pop_failed_realloc()) {
 654       // cannot access frame with failed reallocations
 655       _result = JVMTI_ERROR_OUT_OF_MEMORY;
 656       return false;
 657     }
 658     if (can_be_deoptimized(jvf)) {
 659       compiledVFrame* cf = compiledVFrame::cast(jvf);
 660       if (cf->not_global_escape_in_scope() && !_eb.deoptimize_objects(cf->fr().id())) {
 661         // reallocation of scalar replaced objects failed, because heap is exhausted
 662         _result = JVMTI_ERROR_OUT_OF_MEMORY;
 663         return false;
 664       }
 665     }
 666 
 667     // With this access the object could escape the thread changing its escape state from ArgEscape,
 668     // to GlobalEscape so we must deoptimize callers which could have optimized on the escape state.
 669     vframe* vf = jvf;
 670     do {
 671       // move to next physical frame
 672       while(!vf->is_top()) {
 673         vf = vf->sender();
 674       }
 675       vf = vf->sender();
 676 
 677       if (vf != NULL && vf->is_compiled_frame()) {
 678         compiledVFrame* cvf = compiledVFrame::cast(vf);
 679         // Deoptimize objects if arg escape is being passed down the stack.
 680         // Note that deoptimizing the frame is not enough, because objects need to be relocked
 681         if (cvf->arg_escape() && !_eb.deoptimize_objects(cvf->fr().id())) {
 682           // reallocation of scalar replaced objects failed, because heap is exhausted
 683           _result = JVMTI_ERROR_OUT_OF_MEMORY;
 684           return false;
 685         }
 686       }
 687     } while(vf != NULL && !vf->is_entry_frame());
 688   }
 689 #endif // COMPILER2_OR_JVMCI
 690   return true;
 691 }
 692 
 693 bool VM_GetOrSetLocal::doit_prologue() {
 694   _jvf = get_java_vframe();
 695   NULL_CHECK(_jvf, false);
 696 
 697   Method* method_oop = _jvf->method();
 698   if (getting_receiver()) {
 699     if (method_oop->is_static()) {
 700       _result = JVMTI_ERROR_INVALID_SLOT;
 701       return false;
 702     }
 703     return true;
 704   }
 705 
 706   if (method_oop->is_native()) {
 707     _result = JVMTI_ERROR_OPAQUE_FRAME;
 708     return false;
 709   }
 710 
 711   if (!check_slot_type_no_lvt(_jvf)) {
 712     return false;
 713   }
 714   if (method_oop->has_localvariable_table() && !check_slot_type_lvt(_jvf)) {
 715     return false;
 716   }
 717 
 718   if (!deoptimize_objects(_jvf)) {
 719     return false;
 720   }
 721 
 722   return true;
 723 }
 724 
 725 void VM_GetOrSetLocal::doit() {
 726   InterpreterOopMap oop_mask;
 727   _jvf->method()->mask_for(_jvf->bci(), &oop_mask);
 728   if (oop_mask.is_dead(_index)) {
 729     // The local can be invalid and uninitialized in the scope of current bci
 730     _result = JVMTI_ERROR_INVALID_SLOT;
 731     return;
 732   }
 733   if (_set) {
 734     // Force deoptimization of frame if compiled because it's
 735     // possible the compiler emitted some locals as constant values,
 736     // meaning they are not mutable.
 737     if (can_be_deoptimized(_jvf)) {
 738 
 739       // Schedule deoptimization so that eventually the local
 740       // update will be written to an interpreter frame.
 741       Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
 742 
 743       // Now store a new value for the local which will be applied
 744       // once deoptimization occurs. Note however that while this
 745       // write is deferred until deoptimization actually happens
 746       // can vframe created after this point will have its locals
 747       // reflecting this update so as far as anyone can see the
 748       // write has already taken place.
 749 
 750       // If we are updating an oop then get the oop from the handle
 751       // since the handle will be long gone by the time the deopt
 752       // happens. The oop stored in the deferred local will be
 753       // gc'd on its own.
 754       if (_type == T_OBJECT) {
 755         _value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l));
 756       }
 757       // Re-read the vframe so we can see that it is deoptimized
 758       // [ Only need because of assert in update_local() ]
 759       _jvf = get_java_vframe();
 760       ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
 761       return;
 762     }
 763     StackValueCollection *locals = _jvf->locals();
 764     HandleMark hm;
 765 
 766     switch (_type) {
 767       case T_INT:    locals->set_int_at   (_index, _value.i); break;
 768       case T_LONG:   locals->set_long_at  (_index, _value.j); break;
 769       case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
 770       case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
 771       case T_OBJECT: {
 772         Handle ob_h(Thread::current(), JNIHandles::resolve_external_guard(_value.l));
 773         locals->set_obj_at (_index, ob_h);
 774         break;
 775       }
 776       default: ShouldNotReachHere();
 777     }
 778     _jvf->set_locals(locals);
 779   } else {
 780     if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
 781       assert(getting_receiver(), "Can only get here when getting receiver");
 782       oop receiver = _jvf->fr().get_native_receiver();
 783       _value.l = JNIHandles::make_local(_calling_thread, receiver);
 784     } else {
 785       StackValueCollection *locals = _jvf->locals();
 786 
 787       switch (_type) {
 788         case T_INT:    _value.i = locals->int_at   (_index);   break;
 789         case T_LONG:   _value.j = locals->long_at  (_index);   break;
 790         case T_FLOAT:  _value.f = locals->float_at (_index);   break;
 791         case T_DOUBLE: _value.d = locals->double_at(_index);   break;
 792         case T_OBJECT: {
 793           // Wrap the oop to be returned in a local JNI handle since
 794           // oops_do() no longer applies after doit() is finished.
 795           oop obj = locals->obj_at(_index)();
 796           _value.l = JNIHandles::make_local(_calling_thread, obj);
 797           break;
 798         }
 799         default: ShouldNotReachHere();
 800       }
 801     }
 802   }
 803 }
 804 
 805 
 806 bool VM_GetOrSetLocal::allow_nested_vm_operations() const {
 807   return true; // May need to deoptimize
 808 }
 809 
 810 
 811 VM_GetReceiver::VM_GetReceiver(
 812     JavaThread* thread, JavaThread* caller_thread, jint depth)
 813     : VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
 814 
 815 /////////////////////////////////////////////////////////////////////////////////////////
 816 
 817 //
 818 // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
 819 //
 820 
 821 bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
 822   // external suspend should have caught suspending a thread twice
 823 
 824   // Immediate suspension required for JPDA back-end so JVMTI agent threads do
 825   // not deadlock due to later suspension on transitions while holding
 826   // raw monitors.  Passing true causes the immediate suspension.
 827   // java_suspend() will catch threads in the process of exiting
 828   // and will ignore them.
 829   java_thread->java_suspend();
 830 
 831   // It would be nice to have the following assertion in all the time,
 832   // but it is possible for a racing resume request to have resumed
 833   // this thread right after we suspended it. Temporarily enable this
 834   // assertion if you are chasing a different kind of bug.
 835   //
 836   // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
 837   //   java_thread->is_being_ext_suspended(), "thread is not suspended");
 838 
 839   if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
 840     // check again because we can get delayed in java_suspend():
 841     // the thread is in process of exiting.
 842     return false;
 843   }
 844 
 845   return true;
 846 }
 847 
 848 bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
 849   // external suspend should have caught resuming a thread twice
 850   assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
 851 
 852   // resume thread
 853   {
 854     // must always grab Threads_lock, see JVM_SuspendThread
 855     MutexLocker ml(Threads_lock);
 856     java_thread->java_resume();
 857   }
 858 
 859   return true;
 860 }
 861 
 862 
 863 void JvmtiSuspendControl::print() {
 864 #ifndef PRODUCT
 865   ResourceMark rm;
 866   LogStreamHandle(Trace, jvmti) log_stream;
 867   log_stream.print("Suspended Threads: [");
 868   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
 869 #ifdef JVMTI_TRACE
 870     const char *name   = JvmtiTrace::safe_get_thread_name(thread);
 871 #else
 872     const char *name   = "";
 873 #endif /*JVMTI_TRACE */
 874     log_stream.print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
 875     if (!thread->has_last_Java_frame()) {
 876       log_stream.print("no stack");
 877     }
 878     log_stream.print(") ");
 879   }
 880   log_stream.print_cr("]");
 881 #endif
 882 }
 883 
 884 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
 885     nmethod* nm) {
 886   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
 887   event._event_data.compiled_method_load = nm;
 888   return event;
 889 }
 890 
 891 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
 892     jmethodID id, const void* code) {
 893   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
 894   event._event_data.compiled_method_unload.method_id = id;
 895   event._event_data.compiled_method_unload.code_begin = code;
 896   return event;
 897 }
 898 
 899 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
 900       const char* name, const void* code_begin, const void* code_end) {
 901   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
 902   // Need to make a copy of the name since we don't know how long
 903   // the event poster will keep it around after we enqueue the
 904   // deferred event and return. strdup() failure is handled in
 905   // the post() routine below.
 906   event._event_data.dynamic_code_generated.name = os::strdup(name);
 907   event._event_data.dynamic_code_generated.code_begin = code_begin;
 908   event._event_data.dynamic_code_generated.code_end = code_end;
 909   return event;
 910 }
 911 
 912 JvmtiDeferredEvent JvmtiDeferredEvent::class_unload_event(const char* name) {
 913   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_CLASS_UNLOAD);
 914   // Need to make a copy of the name since we don't know how long
 915   // the event poster will keep it around after we enqueue the
 916   // deferred event and return. strdup() failure is handled in
 917   // the post() routine below.
 918   event._event_data.class_unload.name = os::strdup(name);
 919   return event;
 920 }
 921 
 922 void JvmtiDeferredEvent::post() {
 923   assert(Thread::current()->is_service_thread(),
 924          "Service thread must post enqueued events");
 925   switch(_type) {
 926     case TYPE_COMPILED_METHOD_LOAD: {
 927       nmethod* nm = _event_data.compiled_method_load;
 928       JvmtiExport::post_compiled_method_load(nm);
 929       break;
 930     }
 931     case TYPE_COMPILED_METHOD_UNLOAD: {
 932       JvmtiExport::post_compiled_method_unload(
 933         _event_data.compiled_method_unload.method_id,
 934         _event_data.compiled_method_unload.code_begin);
 935       break;
 936     }
 937     case TYPE_DYNAMIC_CODE_GENERATED: {
 938       JvmtiExport::post_dynamic_code_generated_internal(
 939         // if strdup failed give the event a default name
 940         (_event_data.dynamic_code_generated.name == NULL)
 941           ? "unknown_code" : _event_data.dynamic_code_generated.name,
 942         _event_data.dynamic_code_generated.code_begin,
 943         _event_data.dynamic_code_generated.code_end);
 944       if (_event_data.dynamic_code_generated.name != NULL) {
 945         // release our copy
 946         os::free((void *)_event_data.dynamic_code_generated.name);
 947       }
 948       break;
 949     }
 950     case TYPE_CLASS_UNLOAD: {
 951       JvmtiExport::post_class_unload_internal(
 952         // if strdup failed give the event a default name
 953         (_event_data.class_unload.name == NULL)
 954           ? "unknown_class" : _event_data.class_unload.name);
 955       if (_event_data.class_unload.name != NULL) {
 956         // release our copy
 957         os::free((void *)_event_data.class_unload.name);
 958       }
 959       break;
 960     }
 961     default:
 962       ShouldNotReachHere();
 963   }
 964 }
 965 
 966 void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
 967   assert(_type == TYPE_COMPILED_METHOD_LOAD, "only user of this method");
 968   nmethod* nm = _event_data.compiled_method_load;
 969   JvmtiExport::post_compiled_method_load(env, nm);
 970 }
 971 
 972 void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
 973   if (_type == TYPE_COMPILED_METHOD_LOAD) {
 974     _event_data.compiled_method_load->run_nmethod_entry_barrier();
 975   }
 976 }
 977 
 978 
 979 // Keep the nmethod for compiled_method_load from being unloaded.
 980 void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) {
 981   if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
 982     cf->do_code_blob(_event_data.compiled_method_load);
 983   }
 984 }
 985 
 986 // The sweeper calls this and marks the nmethods here on the stack so that
 987 // they cannot be turned into zombies while in the queue.
 988 void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) {
 989   if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
 990     cf->do_code_blob(_event_data.compiled_method_load);
 991   }
 992 }
 993 
 994 
 995 bool JvmtiDeferredEventQueue::has_events() {
 996   // We save the queued events before the live phase and post them when it starts.
 997   // This code could skip saving the events on the queue before the live
 998   // phase and ignore them, but this would change how we do things now.
 999   // Starting the service thread earlier causes this to be called before the live phase begins.
1000   // The events on the queue should all be posted after the live phase so this is an
1001   // ok check.  Before the live phase, DynamicCodeGenerated events are posted directly.
1002   // If we add other types of events to the deferred queue, this could get ugly.
1003   return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE  && _queue_head != NULL;
1004 }
1005 
1006 void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) {
1007   // Events get added to the end of the queue (and are pulled off the front).
1008   QueueNode* node = new QueueNode(event);
1009   if (_queue_tail == NULL) {
1010     _queue_tail = _queue_head = node;
1011   } else {
1012     assert(_queue_tail->next() == NULL, "Must be the last element in the list");
1013     _queue_tail->set_next(node);
1014     _queue_tail = node;
1015   }
1016 
1017   assert((_queue_head == NULL) == (_queue_tail == NULL),
1018          "Inconsistent queue markers");
1019 }
1020 
1021 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
1022   assert(_queue_head != NULL, "Nothing to dequeue");
1023 
1024   if (_queue_head == NULL) {
1025     // Just in case this happens in product; it shouldn't but let's not crash
1026     return JvmtiDeferredEvent();
1027   }
1028 
1029   QueueNode* node = _queue_head;
1030   _queue_head = _queue_head->next();
1031   if (_queue_head == NULL) {
1032     _queue_tail = NULL;
1033   }
1034 
1035   assert((_queue_head == NULL) == (_queue_tail == NULL),
1036          "Inconsistent queue markers");
1037 
1038   JvmtiDeferredEvent event = node->event();
1039   delete node;
1040   return event;
1041 }
1042 
1043 void JvmtiDeferredEventQueue::post(JvmtiEnv* env) {
1044   // Post and destroy queue nodes
1045   while (_queue_head != NULL) {
1046      JvmtiDeferredEvent event = dequeue();
1047      event.post_compiled_method_load_event(env);
1048   }
1049 }
1050 
1051 void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
1052   for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
1053      node->event().run_nmethod_entry_barriers();
1054   }
1055 }
1056 
1057 
1058 void JvmtiDeferredEventQueue::oops_do(OopClosure* f, CodeBlobClosure* cf) {
1059   for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
1060      node->event().oops_do(f, cf);
1061   }
1062 }
1063 
1064 void JvmtiDeferredEventQueue::nmethods_do(CodeBlobClosure* cf) {
1065   for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
1066      node->event().nmethods_do(cf);
1067   }
1068 }