113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
114 // want to expose the PaddedEnd template more than necessary.
115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
116 // global monitor free list
117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
118 // global monitor in-use list, for moribund threads,
119 // monitors they inflated need to be scanned for deflation
120 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
121 // count of entries in gOmInUseList
122 int ObjectSynchronizer::gOmInUseCount = 0;
123
124 static volatile intptr_t gListLock = 0; // protects global monitor lists
125 static volatile int gMonitorFreeCount = 0; // # on gFreeList
126 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
127
128 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
129 const oop,
130 const ObjectSynchronizer::InflateCause);
131
132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
133 #define CLAIMEDMARKER (cast_to_oop<intptr_t>(-2))
134
135
136 // =====================> Quick functions
137
138 // The quick_* forms are special fast-path variants used to improve
139 // performance. In the simplest case, a "quick_*" implementation could
140 // simply return false, in which case the caller will perform the necessary
141 // state transitions and call the slow-path form.
142 // The fast-path is designed to handle frequently arising cases in an efficient
143 // manner and is just a degenerate "optimistic" variant of the slow-path.
144 // returns true -- to indicate the call was satisfied.
145 // returns false -- to indicate the call needs the services of the slow-path.
146 // A no-loitering ordinance is in effect for code in the quick_* family
147 // operators: safepoints or indefinite blocking (blocking that might span a
148 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
149 // entry.
150 //
151 // Consider: An interesting optimization is to have the JIT recognize the
152 // following common idiom:
153 // synchronized (someobj) { .... ; notify(); }
1931 : _head(head), _cur(head) {
1932 assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
1933 }
1934
1935 ParallelObjectSynchronizerIterator::~ParallelObjectSynchronizerIterator() {
1936 assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
1937 PaddedEnd<ObjectMonitor>* block = (PaddedEnd<ObjectMonitor>*)_head;
1938 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1939 assert(block->object() == CLAIMEDMARKER, "Must be a claimed block");
1940 // Restore chainmarker
1941 block->set_object(CHAINMARKER);
1942 }
1943 }
1944
1945 void* ParallelObjectSynchronizerIterator::claim() {
1946 PaddedEnd<ObjectMonitor>* my_cur = (PaddedEnd<ObjectMonitor>*)_cur;
1947 PaddedEnd<ObjectMonitor>* next_block;
1948
1949 while (true) {
1950 if (my_cur == NULL) return NULL;
1951
1952 if (my_cur->object() == CHAINMARKER) {
1953 if (my_cur->cas_set_object(CLAIMEDMARKER, CHAINMARKER) == CHAINMARKER) {
1954 return (void*)my_cur;
1955 }
1956 } else {
1957 assert(my_cur->object() == CLAIMEDMARKER, "Must be");
1958 }
1959
1960 next_block = (PaddedEnd<ObjectMonitor> *)next(my_cur);
1961 my_cur = (PaddedEnd<ObjectMonitor> *)Atomic::cmpxchg_ptr(next_block, &_cur, my_cur);
1962 }
1963 }
1964
1965 bool ParallelObjectSynchronizerIterator::parallel_oops_do(OopClosure* f) {
1966 PaddedEnd<ObjectMonitor>* block = (PaddedEnd<ObjectMonitor>*)claim();
1967 if (block != NULL) {
1968 assert(block->object() == CLAIMEDMARKER, "Must be a claimed block");
1969 for (int i = 1; i < ObjectSynchronizer::_BLOCKSIZE; i++) {
1970 ObjectMonitor* mid = (ObjectMonitor *)&block[i];
1971 if (mid->object() != NULL) {
1972 f->do_oop((oop*)mid->object_addr());
1973 }
1974 }
1975 return true;
1976 }
1977 return false;
1978 }
|
113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
114 // want to expose the PaddedEnd template more than necessary.
115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
116 // global monitor free list
117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
118 // global monitor in-use list, for moribund threads,
119 // monitors they inflated need to be scanned for deflation
120 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
121 // count of entries in gOmInUseList
122 int ObjectSynchronizer::gOmInUseCount = 0;
123
124 static volatile intptr_t gListLock = 0; // protects global monitor lists
125 static volatile int gMonitorFreeCount = 0; // # on gFreeList
126 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
127
128 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
129 const oop,
130 const ObjectSynchronizer::InflateCause);
131
132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
133
134
135 // =====================> Quick functions
136
137 // The quick_* forms are special fast-path variants used to improve
138 // performance. In the simplest case, a "quick_*" implementation could
139 // simply return false, in which case the caller will perform the necessary
140 // state transitions and call the slow-path form.
141 // The fast-path is designed to handle frequently arising cases in an efficient
142 // manner and is just a degenerate "optimistic" variant of the slow-path.
143 // returns true -- to indicate the call was satisfied.
144 // returns false -- to indicate the call needs the services of the slow-path.
145 // A no-loitering ordinance is in effect for code in the quick_* family
146 // operators: safepoints or indefinite blocking (blocking that might span a
147 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
148 // entry.
149 //
150 // Consider: An interesting optimization is to have the JIT recognize the
151 // following common idiom:
152 // synchronized (someobj) { .... ; notify(); }
1930 : _head(head), _cur(head) {
1931 assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
1932 }
1933
1934 ParallelObjectSynchronizerIterator::~ParallelObjectSynchronizerIterator() {
1935 assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
1936 PaddedEnd<ObjectMonitor>* block = (PaddedEnd<ObjectMonitor>*)_head;
1937 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1938 assert(block->object() == CLAIMEDMARKER, "Must be a claimed block");
1939 // Restore chainmarker
1940 block->set_object(CHAINMARKER);
1941 }
1942 }
1943
1944 void* ParallelObjectSynchronizerIterator::claim() {
1945 PaddedEnd<ObjectMonitor>* my_cur = (PaddedEnd<ObjectMonitor>*)_cur;
1946 PaddedEnd<ObjectMonitor>* next_block;
1947
1948 while (true) {
1949 if (my_cur == NULL) return NULL;
1950 next_block = (PaddedEnd<ObjectMonitor> *)next(my_cur);
1951 void* cas_result = Atomic::cmpxchg_ptr(next_block, &_cur, my_cur);
1952 if (my_cur == cas_result) {
1953 // We succeeded.
1954 return (void*) my_cur;
1955 } else {
1956 // We failed. Retry with offending CAS result.
1957 my_cur = (PaddedEnd<ObjectMonitor>*) cas_result;
1958 }
1959 }
1960 }
1961
1962 bool ParallelObjectSynchronizerIterator::parallel_oops_do(OopClosure* f) {
1963 PaddedEnd<ObjectMonitor>* block = (PaddedEnd<ObjectMonitor>*)claim();
1964 if (block != NULL) {
1965 assert(block->object() == CLAIMEDMARKER, "Must be a claimed block");
1966 for (int i = 1; i < ObjectSynchronizer::_BLOCKSIZE; i++) {
1967 ObjectMonitor* mid = (ObjectMonitor *)&block[i];
1968 if (mid->object() != NULL) {
1969 f->do_oop((oop*)mid->object_addr());
1970 }
1971 }
1972 return true;
1973 }
1974 return false;
1975 }
|