src/share/vm/oops/method.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot-8u Sdiff src/share/vm/oops

src/share/vm/oops/method.hpp

Print this page




 348   // is needed for proper retries. See, for example,
 349   // InterpreterRuntime::exception_handler_for_exception.
 350   static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS);
 351 
 352   // method data access
 353   MethodData* method_data() const              {
 354     return _method_data;
 355   }
 356 
 357   void set_method_data(MethodData* data)       {
 358     // The store into method must be released. On platforms without
 359     // total store order (TSO) the reference may become visible before
 360     // the initialization of data otherwise.
 361     OrderAccess::release_store_ptr((volatile void *)&_method_data, data);
 362   }
 363 
 364   MethodCounters* method_counters() const {
 365     return _method_counters;
 366   }
 367 
 368   void set_method_counters(MethodCounters* counters) {
 369     // The store into method must be released. On platforms without
 370     // total store order (TSO) the reference may become visible before
 371     // the initialization of data otherwise.
 372     OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters);


 373   }
 374 
 375 #ifdef TIERED
 376   // We are reusing interpreter_invocation_count as a holder for the previous event count!
 377   // We can do that since interpreter_invocation_count is not used in tiered.
 378   int prev_event_count() const                   {
 379     if (method_counters() == NULL) {
 380       return 0;
 381     } else {
 382       return method_counters()->interpreter_invocation_count();
 383     }
 384   }
 385   void set_prev_event_count(int count) {
 386     MethodCounters* mcs = method_counters();
 387     if (mcs != NULL) {
 388       mcs->set_interpreter_invocation_count(count);
 389     }
 390   }
 391   jlong prev_time() const                        {
 392     MethodCounters* mcs = method_counters();




 348   // is needed for proper retries. See, for example,
 349   // InterpreterRuntime::exception_handler_for_exception.
 350   static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS);
 351 
 352   // method data access
 353   MethodData* method_data() const              {
 354     return _method_data;
 355   }
 356 
 357   void set_method_data(MethodData* data)       {
 358     // The store into method must be released. On platforms without
 359     // total store order (TSO) the reference may become visible before
 360     // the initialization of data otherwise.
 361     OrderAccess::release_store_ptr((volatile void *)&_method_data, data);
 362   }
 363 
 364   MethodCounters* method_counters() const {
 365     return _method_counters;
 366   }
 367 
 368   void clear_method_counters() {
 369     _method_counters = NULL;
 370   }
 371 
 372   bool init_method_counters(MethodCounters* counters) {
 373     // Try to install a pointer to MethodCounters, return true on success.
 374     return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
 375   }
 376 
 377 #ifdef TIERED
 378   // We are reusing interpreter_invocation_count as a holder for the previous event count!
 379   // We can do that since interpreter_invocation_count is not used in tiered.
 380   int prev_event_count() const                   {
 381     if (method_counters() == NULL) {
 382       return 0;
 383     } else {
 384       return method_counters()->interpreter_invocation_count();
 385     }
 386   }
 387   void set_prev_event_count(int count) {
 388     MethodCounters* mcs = method_counters();
 389     if (mcs != NULL) {
 390       mcs->set_interpreter_invocation_count(count);
 391     }
 392   }
 393   jlong prev_time() const                        {
 394     MethodCounters* mcs = method_counters();


src/share/vm/oops/method.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File