src/share/vm/prims/jvmtiImpl.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot-npg Sdiff src/share/vm/prims

src/share/vm/prims/jvmtiImpl.hpp

Print this page




 464     struct {
 465       nmethod* nm;
 466       jmethodID method_id;
 467       const void* code_begin;
 468     } compiled_method_unload;
 469     struct {
 470       const char* name;
 471       const void* code_begin;
 472       const void* code_end;
 473     } dynamic_code_generated;
 474   } _event_data;
 475 
 476   JvmtiDeferredEvent(Type t) : _type(t) {}
 477 
 478  public:
 479 
 480   JvmtiDeferredEvent() : _type(TYPE_NONE) {}
 481 
 482   // Factory methods
 483   static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
 484     KERNEL_RETURN_(JvmtiDeferredEvent());
 485   static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm,
 486       jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
 487   static JvmtiDeferredEvent dynamic_code_generated_event(
 488       const char* name, const void* begin, const void* end)
 489           KERNEL_RETURN_(JvmtiDeferredEvent());
 490 
 491   // Actually posts the event.
 492   void post() KERNEL_RETURN;
 493 };
 494 
 495 /**
 496  * Events enqueued on this queue wake up the Service thread which dequeues
 497  * and posts the events.  The Service_lock is required to be held
 498  * when operating on the queue (except for the "pending" events).
 499  */
 500 class JvmtiDeferredEventQueue : AllStatic {
 501   friend class JvmtiDeferredEvent;
 502  private:
 503   class QueueNode : public CHeapObj<mtInternal> {
 504    private:
 505     JvmtiDeferredEvent _event;
 506     QueueNode* _next;
 507 
 508    public:
 509     QueueNode(const JvmtiDeferredEvent& event)
 510       : _event(event), _next(NULL) {}
 511 
 512     const JvmtiDeferredEvent& event() const { return _event; }
 513     QueueNode* next() const { return _next; }
 514 
 515     void set_next(QueueNode* next) { _next = next; }
 516   };
 517 
 518   static QueueNode* _queue_head;             // Hold Service_lock to access
 519   static QueueNode* _queue_tail;             // Hold Service_lock to access
 520   static volatile QueueNode* _pending_list;  // Uses CAS for read/update
 521 
 522   // Transfers events from the _pending_list to the _queue.
 523   static void process_pending_events() KERNEL_RETURN;
 524 
 525  public:
 526   // Must be holding Service_lock when calling these
 527   static bool has_events() KERNEL_RETURN_(false);
 528   static void enqueue(const JvmtiDeferredEvent& event) KERNEL_RETURN;
 529   static JvmtiDeferredEvent dequeue() KERNEL_RETURN_(JvmtiDeferredEvent());
 530 
 531   // Used to enqueue events without using a lock, for times (such as during
 532   // safepoint) when we can't or don't want to lock the Service_lock.
 533   //
 534   // Events will be held off to the side until there's a call to
 535   // dequeue(), enqueue(), or process_pending_events() (all of which require
 536   // the holding of the Service_lock), and will be enqueued at that time.
 537   static void add_pending_event(const JvmtiDeferredEvent&) KERNEL_RETURN;
 538 };
 539 
 540 // Utility macro that checks for NULL pointers:
 541 #define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); }
 542 
 543 #endif // SHARE_VM_PRIMS_JVMTIIMPL_HPP


 464     struct {
 465       nmethod* nm;
 466       jmethodID method_id;
 467       const void* code_begin;
 468     } compiled_method_unload;
 469     struct {
 470       const char* name;
 471       const void* code_begin;
 472       const void* code_end;
 473     } dynamic_code_generated;
 474   } _event_data;
 475 
 476   JvmtiDeferredEvent(Type t) : _type(t) {}
 477 
 478  public:
 479 
 480   JvmtiDeferredEvent() : _type(TYPE_NONE) {}
 481 
 482   // Factory methods
 483   static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
 484     NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
 485   static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm,
 486       jmethodID id, const void* code) NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
 487   static JvmtiDeferredEvent dynamic_code_generated_event(
 488       const char* name, const void* begin, const void* end)
 489           NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
 490 
 491   // Actually posts the event.
 492   void post() NOT_JVMTI_RETURN;
 493 };
 494 
 495 /**
 496  * Events enqueued on this queue wake up the Service thread which dequeues
 497  * and posts the events.  The Service_lock is required to be held
 498  * when operating on the queue (except for the "pending" events).
 499  */
 500 class JvmtiDeferredEventQueue : AllStatic {
 501   friend class JvmtiDeferredEvent;
 502  private:
 503   class QueueNode : public CHeapObj<mtInternal> {
 504    private:
 505     JvmtiDeferredEvent _event;
 506     QueueNode* _next;
 507 
 508    public:
 509     QueueNode(const JvmtiDeferredEvent& event)
 510       : _event(event), _next(NULL) {}
 511 
 512     const JvmtiDeferredEvent& event() const { return _event; }
 513     QueueNode* next() const { return _next; }
 514 
 515     void set_next(QueueNode* next) { _next = next; }
 516   };
 517 
 518   static QueueNode* _queue_head;             // Hold Service_lock to access
 519   static QueueNode* _queue_tail;             // Hold Service_lock to access
 520   static volatile QueueNode* _pending_list;  // Uses CAS for read/update
 521 
 522   // Transfers events from the _pending_list to the _queue.
 523   static void process_pending_events() NOT_JVMTI_RETURN;
 524 
 525  public:
 526   // Must be holding Service_lock when calling these
 527   static bool has_events() NOT_JVMTI_RETURN_(false);
 528   static void enqueue(const JvmtiDeferredEvent& event) NOT_JVMTI_RETURN;
 529   static JvmtiDeferredEvent dequeue() NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
 530 
 531   // Used to enqueue events without using a lock, for times (such as during
 532   // safepoint) when we can't or don't want to lock the Service_lock.
 533   //
 534   // Events will be held off to the side until there's a call to
 535   // dequeue(), enqueue(), or process_pending_events() (all of which require
 536   // the holding of the Service_lock), and will be enqueued at that time.
 537   static void add_pending_event(const JvmtiDeferredEvent&) NOT_JVMTI_RETURN;
 538 };
 539 
 540 // Utility macro that checks for NULL pointers:
 541 #define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); }
 542 
 543 #endif // SHARE_VM_PRIMS_JVMTIIMPL_HPP
src/share/vm/prims/jvmtiImpl.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File