1 /* 2 * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" 27 #include "jfr/recorder/checkpoint/constant/jfrThreadGroup.hpp" 28 #include "jfr/utilities/jfrResourceManager.hpp" 29 #include "runtime/handles.inline.hpp" 30 #include "runtime/jniHandles.hpp" 31 #include "runtime/safepoint.hpp" 32 #include "runtime/semaphore.hpp" 33 // to get CONTENT_TYPE defines 34 #include "tracefiles/traceTypes.hpp" 35 #include "utilities/growableArray.hpp" 36 37 class ThreadGroupExclusiveAccess : public StackObj { 38 private: 39 static Semaphore _mutex_semaphore; 40 public: 41 ThreadGroupExclusiveAccess() { _mutex_semaphore.wait(); } 42 ~ThreadGroupExclusiveAccess() { _mutex_semaphore.signal(); } 43 }; 44 45 Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1); 46 JfrThreadGroup* JfrThreadGroup::_instance = NULL; 47 48 class JfrThreadGroupPointers : public ResourceObj { 49 private: 50 const Handle _thread_group_handle; 51 jweak _thread_group_weak_ref; 52 public: 53 JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref); 54 Handle thread_group_handle() const; 55 jweak thread_group_weak_ref() const; 56 oopDesc* const thread_group_oop() const; 57 jweak transfer_weak_global_handle_ownership(); 58 void clear_weak_ref(); 59 }; 60 61 JfrThreadGroupPointers::JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref) : 62 _thread_group_handle(thread_group_handle), 63 _thread_group_weak_ref(thread_group_weak_ref) {} 64 65 Handle JfrThreadGroupPointers::thread_group_handle() const { 66 return _thread_group_handle; 67 } 68 69 jweak JfrThreadGroupPointers::thread_group_weak_ref() const { 70 return _thread_group_weak_ref; 71 } 72 73 oopDesc* const JfrThreadGroupPointers::thread_group_oop() const { 74 assert(_thread_group_weak_ref == NULL || 75 JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant"); 76 return _thread_group_handle(); 77 } 78 79 jweak JfrThreadGroupPointers::transfer_weak_global_handle_ownership() { 80 jweak temp = _thread_group_weak_ref; 81 _thread_group_weak_ref = NULL; 82 return temp; 83 } 84 85 void JfrThreadGroupPointers::clear_weak_ref() { 86 if (NULL != _thread_group_weak_ref) { 87 JNIHandles::destroy_weak_global(_thread_group_weak_ref); 88 } 89 } 90 91 class JfrThreadGroupsHelper : public ResourceObj { 92 private: 93 static const int invalid_iterator_pos = -1; 94 GrowableArray<JfrThreadGroupPointers*>* _thread_group_hierarchy; 95 int _current_iterator_pos; 96 97 int populate_thread_group_hierarchy(const JavaThread* jt, Thread* current); 98 JfrThreadGroupPointers& at(int index); 99 100 public: 101 JfrThreadGroupsHelper(const JavaThread* jt, Thread* current); 102 ~JfrThreadGroupsHelper(); 103 JfrThreadGroupPointers& next(); 104 bool is_valid() const; 105 bool has_next() const; 106 }; 107 108 JfrThreadGroupsHelper::JfrThreadGroupsHelper(const JavaThread* jt, Thread* current) { 109 _thread_group_hierarchy = new GrowableArray<JfrThreadGroupPointers*>(10, false, mtTracing); 110 _current_iterator_pos = populate_thread_group_hierarchy(jt, current) - 1; 111 } 112 113 JfrThreadGroupsHelper::~JfrThreadGroupsHelper() { 114 assert(_current_iterator_pos == invalid_iterator_pos, "invariant"); 115 for (int i = 0; i < _thread_group_hierarchy->length(); ++i) { 116 _thread_group_hierarchy->at(i)->clear_weak_ref(); 117 } 118 } 119 120 JfrThreadGroupPointers& JfrThreadGroupsHelper::at(int index) { 121 assert(_thread_group_hierarchy != NULL, "invariant"); 122 assert(index > invalid_iterator_pos && index < _thread_group_hierarchy->length(), "invariant"); 123 return *(_thread_group_hierarchy->at(index)); 124 } 125 126 bool JfrThreadGroupsHelper::has_next() const { 127 return _current_iterator_pos > invalid_iterator_pos; 128 } 129 130 bool JfrThreadGroupsHelper::is_valid() const { 131 return (_thread_group_hierarchy != NULL && _thread_group_hierarchy->length() > 0); 132 } 133 134 JfrThreadGroupPointers& JfrThreadGroupsHelper::next() { 135 assert(is_valid(), "invariant"); 136 return at(_current_iterator_pos--); 137 } 138 139 /* 140 * If not at a safepoint, we create global weak references for 141 * all reachable threadgroups for this thread. 142 * If we are at a safepoint, the caller is the VMThread during 143 * JFR checkpointing. It can use naked oops, because nothing 144 * will move before the list of threadgroups is cleared and 145 * mutator threads restarted. The threadgroup list is cleared 146 * later by the VMThread as one of the final steps in JFR checkpointing 147 * (not here). 148 */ 149 int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, Thread* current) { 150 assert(jt != NULL && jt->is_Java_thread(), "invariant"); 151 assert(current != NULL, "invariant"); 152 assert(_thread_group_hierarchy != NULL, "invariant"); 153 154 // immediate thread group 155 Handle thread_group_handle(current, java_lang_Thread::threadGroup(jt->threadObj())); 156 if (thread_group_handle == NULL) { 157 return 0; 158 } 159 160 const bool use_weak_handles = !SafepointSynchronize::is_at_safepoint(); 161 jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : NULL; 162 163 JfrThreadGroupPointers* thread_group_pointers = new JfrThreadGroupPointers(thread_group_handle, thread_group_weak_ref); 164 _thread_group_hierarchy->append(thread_group_pointers); 165 // immediate parent thread group 166 oop parent_thread_group_obj = java_lang_ThreadGroup::parent(thread_group_handle()); 167 Handle parent_thread_group_handle(current, parent_thread_group_obj); 168 169 // and check parents parents... 170 while (!(parent_thread_group_handle == NULL)) { 171 const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : NULL; 172 thread_group_pointers = new JfrThreadGroupPointers(parent_thread_group_handle, parent_group_weak_ref); 173 _thread_group_hierarchy->append(thread_group_pointers); 174 parent_thread_group_obj = java_lang_ThreadGroup::parent(parent_thread_group_handle()); 175 parent_thread_group_handle = Handle(current, parent_thread_group_obj); 176 } 177 return _thread_group_hierarchy->length(); 178 } 179 180 static traceid next_id() { 181 static traceid _current_threadgroup_id = 0; 182 return ++_current_threadgroup_id; 183 } 184 185 class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj { 186 friend class JfrThreadGroup; 187 private: 188 traceid _thread_group_id; 189 traceid _parent_group_id; 190 char* _thread_group_name; // utf8 format 191 // If an entry is created during a safepoint, the 192 // _thread_group_oop contains a direct oop to 193 // the java.lang.ThreadGroup object. 194 // If an entry is created on javathread exit time (not at safepoint), 195 // _thread_group_weak_ref contains a JNI weak global handle 196 // indirection to the java.lang.ThreadGroup object. 197 // Note: we cannot use a union here since CHECK_UNHANDLED_OOPS makes oop have 198 // a ctor which isn't allowed in a union by the SunStudio compiler 199 oop _thread_group_oop; 200 jweak _thread_group_weak_ref; 201 202 JfrThreadGroupEntry(const char* tgstr, JfrThreadGroupPointers& ptrs); 203 ~JfrThreadGroupEntry(); 204 205 traceid thread_group_id() const { return _thread_group_id; } 206 void set_thread_group_id(traceid tgid) { _thread_group_id = tgid; } 207 208 const char* const thread_group_name() const { return _thread_group_name; } 209 void set_thread_group_name(const char* tgname); 210 211 traceid parent_group_id() const { return _parent_group_id; } 212 void set_parent_group_id(traceid pgid) { _parent_group_id = pgid; } 213 214 void set_thread_group(JfrThreadGroupPointers& ptrs); 215 bool is_equal(const JfrThreadGroupPointers& ptrs) const; 216 const oop thread_group() const; 217 }; 218 219 JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) : 220 _thread_group_id(0), 221 _parent_group_id(0), 222 _thread_group_name(NULL), 223 _thread_group_oop(NULL), 224 _thread_group_weak_ref(NULL) { 225 set_thread_group_name(tgname); 226 set_thread_group(ptrs); 227 } 228 229 JfrThreadGroup::JfrThreadGroupEntry::~JfrThreadGroupEntry() { 230 if (_thread_group_name != NULL) { 231 JfrCHeapObj::free(_thread_group_name, strlen(_thread_group_name) + 1); 232 } 233 if (_thread_group_weak_ref != NULL) { 234 JNIHandles::destroy_weak_global(_thread_group_weak_ref); 235 } 236 } 237 238 void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgname) { 239 assert(_thread_group_name == NULL, "invariant"); 240 if (tgname != NULL) { 241 size_t len = strlen(tgname); 242 _thread_group_name = JfrCHeapObj::new_array<char>(len+1); 243 strncpy(_thread_group_name, tgname, len); 244 _thread_group_name[len] = '\0'; 245 } 246 } 247 248 const oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const { 249 return _thread_group_weak_ref != NULL ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop; 250 } 251 252 void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group(JfrThreadGroupPointers& ptrs) { 253 _thread_group_weak_ref = ptrs.transfer_weak_global_handle_ownership(); 254 if (_thread_group_weak_ref == NULL) { 255 _thread_group_oop = ptrs.thread_group_oop(); 256 assert(_thread_group_oop != NULL, "invariant"); 257 } else { 258 _thread_group_oop = NULL; 259 } 260 } 261 262 JfrThreadGroup::JfrThreadGroup() : _list(NULL) { 263 _list = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<JfrThreadGroupEntry*>(30, true); 264 } 265 266 JfrThreadGroup::~JfrThreadGroup() { 267 assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 268 if (_list != NULL) { 269 for (int i = 0; i < _list->length(); i++) { 270 JfrThreadGroupEntry* e = _list->at(i); 271 delete e; 272 } 273 delete _list; 274 } 275 } 276 277 JfrThreadGroup* JfrThreadGroup::instance() { 278 return _instance; 279 } 280 281 void JfrThreadGroup::set_instance(JfrThreadGroup* new_instance) { 282 _instance = new_instance; 283 } 284 285 traceid JfrThreadGroup::thread_group_id(const JavaThread* jt, Thread* current) { 286 ResourceMark rm(current); 287 HandleMark hm(current); 288 JfrThreadGroupsHelper helper(jt, current); 289 return helper.is_valid() ? thread_group_id_internal(helper) : 0; 290 } 291 292 traceid JfrThreadGroup::thread_group_id(JavaThread* const jt) { 293 assert(!JfrStream_lock->owned_by_self(), "holding stream lock but should not hold it here"); 294 return thread_group_id(jt, jt); 295 } 296 297 traceid JfrThreadGroup::thread_group_id_internal(JfrThreadGroupsHelper& helper) { 298 ThreadGroupExclusiveAccess lock; 299 JfrThreadGroup* tg_instance = instance(); 300 if (tg_instance == NULL) { 301 tg_instance = new JfrThreadGroup(); 302 if (tg_instance == NULL) { 303 return 0; 304 } 305 set_instance(tg_instance); 306 } 307 308 JfrThreadGroupEntry* tge = NULL; 309 int parent_thread_group_id = 0; 310 while (helper.has_next()) { 311 JfrThreadGroupPointers& ptrs = helper.next(); 312 tge = tg_instance->find_entry(ptrs); 313 if (NULL == tge) { 314 tge = tg_instance->new_entry(ptrs); 315 assert(tge != NULL, "invariant"); 316 tge->set_parent_group_id(parent_thread_group_id); 317 } 318 parent_thread_group_id = tge->thread_group_id(); 319 } 320 // the last entry in the hierarchy is the immediate thread group 321 return tge->thread_group_id(); 322 } 323 324 bool JfrThreadGroup::JfrThreadGroupEntry::is_equal(const JfrThreadGroupPointers& ptrs) const { 325 return ptrs.thread_group_oop() == thread_group(); 326 } 327 328 JfrThreadGroup::JfrThreadGroupEntry* 329 JfrThreadGroup::find_entry(const JfrThreadGroupPointers& ptrs) const { 330 for (int index = 0; index < _list->length(); ++index) { 331 JfrThreadGroupEntry* curtge = _list->at(index); 332 if (curtge->is_equal(ptrs)) { 333 return curtge; 334 } 335 } 336 return (JfrThreadGroupEntry*) NULL; 337 } 338 339 // Assumes you already searched for the existence 340 // of a corresponding entry in find_entry(). 341 JfrThreadGroup::JfrThreadGroupEntry* 342 JfrThreadGroup::new_entry(JfrThreadGroupPointers& ptrs) { 343 const char* str = NULL; 344 typeArrayOop name = java_lang_ThreadGroup::name(ptrs.thread_group_oop()); 345 if (name != NULL) { 346 str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length()); 347 } 348 JfrThreadGroupEntry* const tge = new JfrThreadGroupEntry(str, ptrs); 349 add_entry(tge); 350 return tge; 351 } 352 353 int JfrThreadGroup::add_entry(JfrThreadGroupEntry* tge) { 354 assert(tge != NULL, "attempting to add a null entry!"); 355 assert(0 == tge->thread_group_id(), "id must be unassigned!"); 356 tge->set_thread_group_id(next_id()); 357 return _list->append(tge); 358 } 359 360 void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) const { 361 assert(_list != NULL && !_list->is_empty(), "should not need be here!"); 362 const int number_of_tg_entries = _list->length(); 363 writer.write_number_of_constants(number_of_tg_entries); 364 for (int index = 0; index < number_of_tg_entries; ++index) { 365 const JfrThreadGroupEntry* const curtge = _list->at(index); 366 writer.write_key(curtge->thread_group_id()); 367 writer.write(curtge->parent_group_id()); 368 writer.write(curtge->thread_group_name()); 369 } 370 } 371 372 void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const { 373 assert(writer != NULL, "invariant"); 374 assert(_list != NULL && !_list->is_empty(), "should not need be here!"); 375 const int number_of_tg_entries = _list->length(); 376 377 // save context 378 const JfrCheckpointContext ctx = writer->context(); 379 writer->write_constant_type(CONSTANT_TYPE_THREADGROUP); 380 const jlong count_offset = writer->reserve(sizeof(u4)); // Don't know how many yet 381 int number_of_entries_written = 0; 382 for (int index = number_of_tg_entries - 1; index >= 0; --index) { 383 const JfrThreadGroupEntry* const curtge = _list->at(index); 384 if (thread_group_id == curtge->thread_group_id()) { 385 writer->write_key(curtge->thread_group_id()); 386 writer->write(curtge->parent_group_id()); 387 writer->write(curtge->thread_group_name()); 388 ++number_of_entries_written; 389 thread_group_id = curtge->parent_group_id(); 390 } 391 } 392 if (number_of_entries_written == 0) { 393 // nothing to write, restore context 394 writer->set_context(ctx); 395 return; 396 } 397 assert(number_of_entries_written > 0, "invariant"); 398 writer->write_number_of_constants(number_of_entries_written, count_offset); 399 } 400 401 // Write out JfrThreadGroup instance and then delete it 402 void JfrThreadGroup::write(JfrCheckpointWriter& writer) { 403 ThreadGroupExclusiveAccess lock; 404 JfrThreadGroup* tg_instance = instance(); 405 assert(tg_instance != NULL, "invariant"); 406 ResourceManager<JfrThreadGroup> tg_handle(tg_instance); 407 set_instance(NULL); 408 tg_handle->write_thread_group_entries(writer); 409 } 410 411 // for writing a particular thread group 412 void JfrThreadGroup::write(JfrCheckpointWriter* writer, traceid thread_group_id) { 413 assert(writer != NULL, "invariant"); 414 ThreadGroupExclusiveAccess lock; 415 JfrThreadGroup* const tg_instance = instance(); 416 assert(tg_instance != NULL, "invariant"); 417 tg_instance->write_selective_thread_group(writer, thread_group_id); 418 }