867 assert(monitor != NULL, "monitor should be non-null");
868 owner = (address) monitor->owner();
869 }
870
871 if (owner != NULL) {
872 // owning_thread_from_monitor_owner() may also return NULL here
873 return Threads::owning_thread_from_monitor_owner(t_list, owner);
874 }
875
876 // Unlocked case, header in place
877 // Cannot have assertion since this object may have been
878 // locked by another thread when reaching here.
879 // assert(mark.is_neutral(), "sanity check");
880
881 return NULL;
882 }
883
884 // Visitors ...
885
886 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
887 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
888 while (block != NULL) {
889 assert(block->object() == CHAINMARKER, "must be a block header");
890 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
891 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
892 oop object = (oop)mid->object();
893 if (object != NULL) {
894 // Only process with closure if the object is set.
895 closure->do_monitor(mid);
896 }
897 }
898 block = (PaddedObjectMonitor*)block->_next_om;
899 }
900 }
901
902 static bool monitors_used_above_threshold() {
903 if (g_om_population == 0) {
904 return false;
905 }
906 int monitors_used = g_om_population - g_om_free_count;
907 int monitor_usage = (monitors_used * 100LL) / g_om_population;
1096
1097 // Element [0] is reserved for global list linkage
1098 temp[0].set_object(CHAINMARKER);
1099
1100 // Consider carving out this thread's current request from the
1101 // block in hand. This avoids some lock traffic and redundant
1102 // list activity.
1103
1104 // Acquire the gListLock to manipulate g_block_list and g_free_list.
1105 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1106 Thread::muxAcquire(&gListLock, "om_alloc(2)");
1107 g_om_population += _BLOCKSIZE-1;
1108 g_om_free_count += _BLOCKSIZE-1;
1109
1110 // Add the new block to the list of extant blocks (g_block_list).
1111 // The very first ObjectMonitor in a block is reserved and dedicated.
1112 // It serves as blocklist "next" linkage.
1113 temp[0]._next_om = g_block_list;
1114 // There are lock-free uses of g_block_list so make sure that
1115 // the previous stores happen before we update g_block_list.
1116 OrderAccess::release_store(&g_block_list, temp);
1117
1118 // Add the new string of ObjectMonitors to the global free list
1119 temp[_BLOCKSIZE - 1]._next_om = g_free_list;
1120 g_free_list = temp + 1;
1121 Thread::muxRelease(&gListLock);
1122 }
1123 }
1124
1125 // Place "m" on the caller's private per-thread om_free_list.
1126 // In practice there's no need to clamp or limit the number of
1127 // monitors on a thread's om_free_list as the only non-allocation time
1128 // we'll call om_release() is to return a monitor to the free list after
1129 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1130 // accumulate on a thread's free list.
1131 //
1132 // Key constraint: all ObjectMonitors on a thread's free list and the global
1133 // free list must have their object field set to null. This prevents the
1134 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
1135 // are trying to release them.
1136
2147
2148 out->print_cr("%18s %10s %10s %10s",
2149 "Per-Thread Lists:", "InUse", "Free", "Provision");
2150 out->print_cr("================== ========== ========== ==========");
2151
2152 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2153 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2154 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
2155 pop_count += jt->om_in_use_count + jt->om_free_count;
2156 }
2157 return pop_count;
2158 }
2159
2160 #ifndef PRODUCT
2161
2162 // Check if monitor belongs to the monitor cache
2163 // The list is grow-only so it's *relatively* safe to traverse
2164 // the list of extant blocks without taking a lock.
2165
2166 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
2167 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
2168 while (block != NULL) {
2169 assert(block->object() == CHAINMARKER, "must be a block header");
2170 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
2171 address mon = (address)monitor;
2172 address blk = (address)block;
2173 size_t diff = mon - blk;
2174 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
2175 return 1;
2176 }
2177 block = (PaddedObjectMonitor*)block->_next_om;
2178 }
2179 return 0;
2180 }
2181
2182 #endif
|
867 assert(monitor != NULL, "monitor should be non-null");
868 owner = (address) monitor->owner();
869 }
870
871 if (owner != NULL) {
872 // owning_thread_from_monitor_owner() may also return NULL here
873 return Threads::owning_thread_from_monitor_owner(t_list, owner);
874 }
875
876 // Unlocked case, header in place
877 // Cannot have assertion since this object may have been
878 // locked by another thread when reaching here.
879 // assert(mark.is_neutral(), "sanity check");
880
881 return NULL;
882 }
883
884 // Visitors ...
885
886 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
887 PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
888 while (block != NULL) {
889 assert(block->object() == CHAINMARKER, "must be a block header");
890 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
891 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
892 oop object = (oop)mid->object();
893 if (object != NULL) {
894 // Only process with closure if the object is set.
895 closure->do_monitor(mid);
896 }
897 }
898 block = (PaddedObjectMonitor*)block->_next_om;
899 }
900 }
901
902 static bool monitors_used_above_threshold() {
903 if (g_om_population == 0) {
904 return false;
905 }
906 int monitors_used = g_om_population - g_om_free_count;
907 int monitor_usage = (monitors_used * 100LL) / g_om_population;
1096
1097 // Element [0] is reserved for global list linkage
1098 temp[0].set_object(CHAINMARKER);
1099
1100 // Consider carving out this thread's current request from the
1101 // block in hand. This avoids some lock traffic and redundant
1102 // list activity.
1103
1104 // Acquire the gListLock to manipulate g_block_list and g_free_list.
1105 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1106 Thread::muxAcquire(&gListLock, "om_alloc(2)");
1107 g_om_population += _BLOCKSIZE-1;
1108 g_om_free_count += _BLOCKSIZE-1;
1109
1110 // Add the new block to the list of extant blocks (g_block_list).
1111 // The very first ObjectMonitor in a block is reserved and dedicated.
1112 // It serves as blocklist "next" linkage.
1113 temp[0]._next_om = g_block_list;
1114 // There are lock-free uses of g_block_list so make sure that
1115 // the previous stores happen before we update g_block_list.
1116 Atomic::release_store(&g_block_list, temp);
1117
1118 // Add the new string of ObjectMonitors to the global free list
1119 temp[_BLOCKSIZE - 1]._next_om = g_free_list;
1120 g_free_list = temp + 1;
1121 Thread::muxRelease(&gListLock);
1122 }
1123 }
1124
1125 // Place "m" on the caller's private per-thread om_free_list.
1126 // In practice there's no need to clamp or limit the number of
1127 // monitors on a thread's om_free_list as the only non-allocation time
1128 // we'll call om_release() is to return a monitor to the free list after
1129 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1130 // accumulate on a thread's free list.
1131 //
1132 // Key constraint: all ObjectMonitors on a thread's free list and the global
1133 // free list must have their object field set to null. This prevents the
1134 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
1135 // are trying to release them.
1136
2147
2148 out->print_cr("%18s %10s %10s %10s",
2149 "Per-Thread Lists:", "InUse", "Free", "Provision");
2150 out->print_cr("================== ========== ========== ==========");
2151
2152 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2153 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2154 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
2155 pop_count += jt->om_in_use_count + jt->om_free_count;
2156 }
2157 return pop_count;
2158 }
2159
2160 #ifndef PRODUCT
2161
2162 // Check if monitor belongs to the monitor cache
2163 // The list is grow-only so it's *relatively* safe to traverse
2164 // the list of extant blocks without taking a lock.
2165
2166 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
2167 PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
2168 while (block != NULL) {
2169 assert(block->object() == CHAINMARKER, "must be a block header");
2170 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
2171 address mon = (address)monitor;
2172 address blk = (address)block;
2173 size_t diff = mon - blk;
2174 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
2175 return 1;
2176 }
2177 block = (PaddedObjectMonitor*)block->_next_om;
2178 }
2179 return 0;
2180 }
2181
2182 #endif
|