9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "runtime/sweeper.hpp"
30 #include "utilities/powerOfTwo.hpp"
31
32 // -------------------------
33 // | General Description |
34 // -------------------------
35 // The CodeHeap state analytics are divided in two parts.
36 // The first part examines the entire CodeHeap and aggregates all
37 // information that is believed useful/important.
38 //
39 // Aggregation condenses the information of a piece of the CodeHeap
40 // (4096 bytes by default) into an analysis granule. These granules
41 // contain enough detail to gain initial insight while keeping the
42 // internal structure sizes in check.
43 //
44 // The second part, which consists of several, independent steps,
45 // prints the previously collected information with emphasis on
46 // various aspects.
47 //
48 // The CodeHeap is a living thing. Therefore, protection against concurrent
214 , "nMethod (active)"
215 , "nMethod (inactive)"
216 , "nMethod (deopt)"
217 , "nMethod (zombie)"
218 , "nMethod (unloaded)"
219 , "runtime stub"
220 , "ricochet stub"
221 , "deopt stub"
222 , "uncommon trap stub"
223 , "exception stub"
224 , "safepoint stub"
225 , "adapter blob"
226 , "MH adapter blob"
227 , "buffer blob"
228 , "lastType"
229 };
230 const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
231
232 // Be prepared for ten different CodeHeap segments. Should be enough for a few years.
233 const unsigned int nSizeDistElements = 31; // logarithmic range growth, max size: 2**32
234 const unsigned int maxTopSizeBlocks = 50;
235 const unsigned int tsbStopper = 2 * maxTopSizeBlocks;
236 const unsigned int maxHeaps = 10;
237 static unsigned int nHeaps = 0;
238 static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
239
240 // static struct StatElement *StatArray = NULL;
241 static StatElement* StatArray = NULL;
242 static int log2_seg_size = 0;
243 static size_t seg_size = 0;
244 static size_t alloc_granules = 0;
245 static size_t granule_size = 0;
246 static bool segment_granules = false;
247 static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only.
248 static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only.
249 static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only.
250 static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only.
251 static unsigned int nBlocks_inconstr = 0; // counting "inconstruction" nmethods only. This is a transient state.
252 static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state.
253 static unsigned int nBlocks_stub = 0;
254
479
480 void CodeHeapState::discard_StatArray(outputStream* out) {
481 if (StatArray != NULL) {
482 delete StatArray;
483 StatArray = NULL;
484 alloc_granules = 0;
485 granule_size = 0;
486 }
487 }
488
489 void CodeHeapState::discard_FreeArray(outputStream* out) {
490 if (FreeArray != NULL) {
491 delete[] FreeArray;
492 FreeArray = NULL;
493 alloc_freeBlocks = 0;
494 }
495 }
496
497 void CodeHeapState::discard_TopSizeArray(outputStream* out) {
498 if (TopSizeArray != NULL) {
499 delete[] TopSizeArray;
500 TopSizeArray = NULL;
501 alloc_topSizeBlocks = 0;
502 used_topSizeBlocks = 0;
503 }
504 }
505
506 void CodeHeapState::discard_SizeDistArray(outputStream* out) {
507 if (SizeDistributionArray != NULL) {
508 delete[] SizeDistributionArray;
509 SizeDistributionArray = NULL;
510 }
511 }
512
513 // Discard all allocated internal data structures.
514 // This should be done after an analysis session is completed.
515 void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
516 if (!initialization_complete) {
517 return;
518 }
572 BUFFEREDSTREAM_FLUSH("")
573 }
574 get_HeapStatGlobals(out, heapName);
575
576
577 // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
578 // all heap information is "constant" and can be safely extracted/calculated before we
579 // enter the while() loop. Actually, the loop will only be iterated once.
580 char* low_bound = heap->low_boundary();
581 size_t size = heap->capacity();
582 size_t res_size = heap->max_capacity();
583 seg_size = heap->segment_size();
584 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size); // This is a global static value.
585
586 if (seg_size == 0) {
587 printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
588 BUFFEREDSTREAM_FLUSH("")
589 return;
590 }
591
592 if (!CodeCache_lock->owned_by_self()) {
593 printBox(ast, '-', "aggregate function called without holding the CodeCache_lock for ", heapName);
594 BUFFEREDSTREAM_FLUSH("")
595 return;
596 }
597
598 // Calculate granularity of analysis (and output).
599 // The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
600 // The CodeHeap can become fairly large, in particular in productive real-life systems.
601 //
602 // It is often neither feasible nor desirable to aggregate the data with the highest possible
603 // level of detail, i.e. inspecting and printing each segment on its own.
604 //
605 // The granularity parameter allows to specify the level of detail available in the analysis.
606 // It must be a positive multiple of the segment size and should be selected such that enough
607 // detail is provided while, at the same time, the printed output does not explode.
608 //
609 // By manipulating the granularity value, we enforce that at least min_granules units
610 // of analysis are available. We also enforce an upper limit of max_granules units to
611 // keep the amount of allocated storage in check.
612 //
613 // Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
736 if (ix_beg > ix_end) {
737 insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
738 }
739 if (insane) {
740 BUFFEREDSTREAM_FLUSH("")
741 continue;
742 }
743
744 if (h->free()) {
745 nBlocks_free++;
746 freeSpace += hb_bytelen;
747 if (hb_bytelen > maxFreeSize) {
748 maxFreeSize = hb_bytelen;
749 maxFreeBlock = h;
750 }
751 } else {
752 update_SizeDistArray(out, hb_len);
753 nBlocks_used++;
754 usedSpace += hb_bytelen;
755 CodeBlob* cb = (CodeBlob*)heap->find_start(h);
756 if (cb != NULL) {
757 cbType = get_cbType(cb);
758 if (cb->is_nmethod()) {
759 compile_id = ((nmethod*)cb)->compile_id();
760 comp_lvl = (CompLevel)((nmethod*)cb)->comp_level();
761 if (((nmethod*)cb)->is_compiled_by_c1()) {
762 cType = c1;
763 }
764 if (((nmethod*)cb)->is_compiled_by_c2()) {
765 cType = c2;
766 }
767 if (((nmethod*)cb)->is_compiled_by_jvmci()) {
768 cType = jvmci;
769 }
770 switch (cbType) {
771 case nMethod_inuse: { // only for executable methods!!!
772 // space for these cbs is accounted for later.
773 int temperature = ((nmethod*)cb)->hotness_counter();
774 hotnessAccumulator += temperature;
775 n_methods++;
776 maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
777 minTemp = (temperature < minTemp) ? temperature : minTemp;
778 break;
779 }
780 case nMethod_notused:
781 nBlocks_alive++;
782 nBlocks_disconn++;
783 aliveSpace += hb_bytelen;
784 disconnSpace += hb_bytelen;
785 break;
786 case nMethod_notentrant: // equivalent to nMethod_alive
787 nBlocks_alive++;
788 nBlocks_notentr++;
789 aliveSpace += hb_bytelen;
790 notentrSpace += hb_bytelen;
791 break;
792 case nMethod_unloaded:
793 nBlocks_unloaded++;
795 break;
796 case nMethod_dead:
797 nBlocks_dead++;
798 deadSpace += hb_bytelen;
799 break;
800 case nMethod_inconstruction:
801 nBlocks_inconstr++;
802 inconstrSpace += hb_bytelen;
803 break;
804 default:
805 break;
806 }
807 }
808
809 //------------------------------------------
810 //---< register block in TopSizeArray >---
811 //------------------------------------------
812 if (alloc_topSizeBlocks > 0) {
813 if (used_topSizeBlocks == 0) {
814 TopSizeArray[0].start = h;
815 TopSizeArray[0].len = hb_len;
816 TopSizeArray[0].index = tsbStopper;
817 TopSizeArray[0].compiler = cType;
818 TopSizeArray[0].level = comp_lvl;
819 TopSizeArray[0].type = cbType;
820 currMax = hb_len;
821 currMin = hb_len;
822 currMin_ix = 0;
823 used_topSizeBlocks++;
824 // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
825 } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
826 //---< all blocks in list are larger, but there is room left in array >---
827 TopSizeArray[currMin_ix].index = used_topSizeBlocks;
828 TopSizeArray[used_topSizeBlocks].start = h;
829 TopSizeArray[used_topSizeBlocks].len = hb_len;
830 TopSizeArray[used_topSizeBlocks].index = tsbStopper;
831 TopSizeArray[used_topSizeBlocks].compiler = cType;
832 TopSizeArray[used_topSizeBlocks].level = comp_lvl;
833 TopSizeArray[used_topSizeBlocks].type = cbType;
834 currMin = hb_len;
835 currMin_ix = used_topSizeBlocks;
836 used_topSizeBlocks++;
837 } else {
838 // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
839 // We don't need to search the list if we know beforehand that the current block size is
840 // smaller than the currently recorded minimum and there is no free entry left in the list.
841 if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
842 if (currMax < hb_len) {
843 currMax = hb_len;
844 }
845 unsigned int i;
846 unsigned int prev_i = tsbStopper;
847 unsigned int limit_i = 0;
848 for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
849 if (limit_i++ >= alloc_topSizeBlocks) {
850 insane = true; break; // emergency exit
851 }
852 if (i >= used_topSizeBlocks) {
853 insane = true; break; // emergency exit
854 }
855 total_iterations++;
856 if (TopSizeArray[i].len < hb_len) {
857 //---< We want to insert here, element <i> is smaller than the current one >---
858 if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
859 // old entry gets moved to the next free element of the array.
860 // That's necessary to keep the entry for the largest block at index 0.
861 // This move might cause the current minimum to be moved to another place
862 if (i == currMin_ix) {
863 assert(TopSizeArray[i].len == currMin, "sort error");
864 currMin_ix = used_topSizeBlocks;
865 }
866 memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
867 TopSizeArray[i].start = h;
868 TopSizeArray[i].len = hb_len;
869 TopSizeArray[i].index = used_topSizeBlocks;
870 TopSizeArray[i].compiler = cType;
871 TopSizeArray[i].level = comp_lvl;
872 TopSizeArray[i].type = cbType;
873 used_topSizeBlocks++;
874 } else { // no room for new entries, current block replaces entry for smallest block
875 //---< Find last entry (entry for smallest remembered block) >---
876 unsigned int j = i;
877 unsigned int prev_j = tsbStopper;
878 unsigned int limit_j = 0;
879 while (TopSizeArray[j].index != tsbStopper) {
880 if (limit_j++ >= alloc_topSizeBlocks) {
881 insane = true; break; // emergency exit
882 }
883 if (j >= used_topSizeBlocks) {
884 insane = true; break; // emergency exit
885 }
886 total_iterations++;
887 prev_j = j;
888 j = TopSizeArray[j].index;
889 }
890 if (!insane) {
891 if (prev_j == tsbStopper) {
892 //---< Above while loop did not iterate, we already are the min entry >---
893 //---< We have to just replace the smallest entry >---
894 currMin = hb_len;
895 currMin_ix = j;
896 TopSizeArray[j].start = h;
897 TopSizeArray[j].len = hb_len;
898 TopSizeArray[j].index = tsbStopper; // already set!!
899 TopSizeArray[j].compiler = cType;
900 TopSizeArray[j].level = comp_lvl;
901 TopSizeArray[j].type = cbType;
902 } else {
903 //---< second-smallest entry is now smallest >---
904 TopSizeArray[prev_j].index = tsbStopper;
905 currMin = TopSizeArray[prev_j].len;
906 currMin_ix = prev_j;
907 //---< smallest entry gets overwritten >---
908 memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
909 TopSizeArray[i].start = h;
910 TopSizeArray[i].len = hb_len;
911 TopSizeArray[i].index = j;
912 TopSizeArray[i].compiler = cType;
913 TopSizeArray[i].level = comp_lvl;
914 TopSizeArray[i].type = cbType;
915 }
916 } // insane
917 }
918 break;
919 }
920 prev_i = i;
921 }
922 if (insane) {
923 // Note: regular analysis could probably continue by resetting "insane" flag.
924 out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
925 discard_TopSizeArray(out);
926 }
927 }
928 }
929 }
930 //----------------------------------------------
931 //---< END register block in TopSizeArray >---
932 //----------------------------------------------
933 } else {
934 nBlocks_zomb++;
935 }
936
937 if (ix_beg == ix_end) {
938 StatArray[ix_beg].type = cbType;
939 switch (cbType) {
940 case nMethod_inuse:
941 highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
942 if (comp_lvl < CompLevel_full_optimization) {
943 nBlocks_t1++;
944 t1Space += hb_bytelen;
945 StatArray[ix_beg].t1_count++;
946 StatArray[ix_beg].t1_space += (unsigned short)hb_len;
947 StatArray[ix_beg].t1_age = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
948 } else {
949 nBlocks_t2++;
1258 ast->print_cr("Free block count mismatch could not be resolved.");
1259 ast->print_cr("Try to run \"aggregate\" function to update counters");
1260 }
1261 BUFFEREDSTREAM_FLUSH("")
1262
1263 //---< discard old array and update global values >---
1264 discard_FreeArray(out);
1265 set_HeapStatGlobals(out, heapName);
1266 return;
1267 }
1268
1269 //---< calculate and fill remaining fields >---
1270 if (FreeArray != NULL) {
1271 // This loop is intentionally printing directly to "out".
1272 // It should not print anything, anyway.
1273 for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
1274 size_t lenSum = 0;
1275 FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
1276 for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
1277 CodeBlob *cb = (CodeBlob*)(heap->find_start(h));
1278 if ((cb != NULL) && !cb->is_nmethod()) {
1279 FreeArray[ix].stubs_in_gap = true;
1280 }
1281 FreeArray[ix].n_gapBlocks++;
1282 lenSum += h->length()<<log2_seg_size;
1283 if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
1284 out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
1285 }
1286 }
1287 if (lenSum != FreeArray[ix].gap) {
1288 out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
1289 }
1290 }
1291 }
1292 set_HeapStatGlobals(out, heapName);
1293
1294 printBox(ast, '=', "C O D E H E A P A N A L Y S I S C O M P L E T E for segment ", heapName);
1295 BUFFEREDSTREAM_FLUSH("\n")
1296 }
1297
1298
1310 BUFFEREDSTREAM_DECL(ast, out)
1311
1312 {
1313 printBox(ast, '=', "U S E D S P A C E S T A T I S T I C S for ", heapName);
1314 ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
1315 " and other identifying information with the block size data.\n"
1316 "\n"
1317 " Method names are dynamically retrieved from the code cache at print time.\n"
1318 " Due to the living nature of the code cache and because the CodeCache_lock\n"
1319 " is not continuously held, the displayed name might be wrong or no name\n"
1320 " might be found at all. The likelihood for that to happen increases\n"
1321 " over time passed between analysis and print step.\n", used_topSizeBlocks);
1322 BUFFEREDSTREAM_FLUSH_LOCKED("\n")
1323 }
1324
1325 //----------------------------
1326 //-- Print Top Used Blocks --
1327 //----------------------------
1328 {
1329 char* low_bound = heap->low_boundary();
1330 bool have_CodeCache_lock = CodeCache_lock->owned_by_self();
1331
1332 printBox(ast, '-', "Largest Used Blocks in ", heapName);
1333 print_blobType_legend(ast);
1334
1335 ast->fill_to(51);
1336 ast->print("%4s", "blob");
1337 ast->fill_to(56);
1338 ast->print("%9s", "compiler");
1339 ast->fill_to(66);
1340 ast->print_cr("%6s", "method");
1341 ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name");
1342 BUFFEREDSTREAM_FLUSH_LOCKED("")
1343
1344 //---< print Top Ten Used Blocks >---
1345 if (used_topSizeBlocks > 0) {
1346 unsigned int printed_topSizeBlocks = 0;
1347 for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
1348 printed_topSizeBlocks++;
1349 nmethod* nm = NULL;
1350 const char* blob_name = "unnamed blob or blob name unavailable";
1351 // heap->find_start() is safe. Only works on _segmap.
1352 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
1353 HeapBlock* heapBlock = TopSizeArray[i].start;
1354 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(heapBlock));
1355 bool blob_is_safe = blob_access_is_safe(this_blob, NULL);
1356 if (blob_is_safe) {
1357 //---< access these fields only if we own the CodeCache_lock >---
1358 if (have_CodeCache_lock) {
1359 blob_name = this_blob->name();
1360 nm = this_blob->as_nmethod_or_null();
1361 }
1362 //---< blob address >---
1363 ast->print(INTPTR_FORMAT, p2i(this_blob));
1364 ast->fill_to(19);
1365 //---< blob offset from CodeHeap begin >---
1366 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
1367 ast->fill_to(33);
1368 } else {
1369 //---< block address >---
1370 ast->print(INTPTR_FORMAT, p2i(TopSizeArray[i].start));
1371 ast->fill_to(19);
1372 //---< block offset from CodeHeap begin >---
1373 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
1374 ast->fill_to(33);
1375 }
1376
1377 //---< print size, name, and signature (for nMethods) >---
1378 // access nmethod and Method fields only if we own the CodeCache_lock.
1379 // This fact is implicitly transported via nm != NULL.
1380 if (CompiledMethod::nmethod_access_is_safe(nm)) {
1381 ResourceMark rm;
1382 Method* method = nm->method();
1383 if (nm->is_in_use()) {
1384 blob_name = method->name_and_sig_as_C_string();
1385 }
1386 if (nm->is_not_entrant()) {
1387 blob_name = method->name_and_sig_as_C_string();
1388 }
1389 //---< nMethod size in hex >---
1390 unsigned int total_size = nm->total_size();
1391 ast->print(PTR32_FORMAT, total_size);
1392 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
1393 ast->fill_to(51);
1394 ast->print(" %c", blobTypeChar[TopSizeArray[i].type]);
1395 //---< compiler information >---
1396 ast->fill_to(56);
1397 ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
1398 //---< method temperature >---
1399 ast->fill_to(67);
1400 ast->print("%5d", nm->hotness_counter());
1401 //---< name and signature >---
1402 ast->fill_to(67+6);
1403 if (nm->is_not_installed()) {
1404 ast->print(" not (yet) installed method ");
1405 }
1406 if (nm->is_zombie()) {
1407 ast->print(" zombie method ");
1408 }
1409 ast->print("%s", blob_name);
1410 } else {
1411 //---< block size in hex >---
1412 ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
1413 ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
1414 //---< no compiler information >---
1415 ast->fill_to(56);
1416 //---< name and signature >---
1417 ast->fill_to(67+6);
1418 ast->print("%s", blob_name);
1419 }
1420 ast->cr();
1421 BUFFEREDSTREAM_FLUSH_AUTO("")
1422 }
1423 if (used_topSizeBlocks != printed_topSizeBlocks) {
1424 ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
1425 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
1426 ast->print_cr(" TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
1427 BUFFEREDSTREAM_FLUSH_AUTO("")
1428 }
1429 }
1430 BUFFEREDSTREAM_FLUSH("\n\n")
1431 }
1432 }
1433
1434 //-----------------------------
1435 //-- Print Usage Histogram --
1436 //-----------------------------
1437
1438 if (SizeDistributionArray != NULL) {
2179 }
2180
2181
2182 void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
2183 if (!initialization_complete) {
2184 return;
2185 }
2186
2187 const char* heapName = get_heapName(heap);
2188 get_HeapStatGlobals(out, heapName);
2189
2190 if ((StatArray == NULL) || (alloc_granules == 0)) {
2191 return;
2192 }
2193 BUFFEREDSTREAM_DECL(ast, out)
2194
2195 unsigned int granules_per_line = 128;
2196 char* low_bound = heap->low_boundary();
2197 CodeBlob* last_blob = NULL;
2198 bool name_in_addr_range = true;
2199 bool have_CodeCache_lock = CodeCache_lock->owned_by_self();
2200
2201 //---< print at least 128K per block (i.e. between headers) >---
2202 if (granules_per_line*granule_size < 128*K) {
2203 granules_per_line = (unsigned int)((128*K)/granule_size);
2204 }
2205
2206 printBox(ast, '=', "M E T H O D N A M E S for ", heapName);
2207 ast->print_cr(" Method names are dynamically retrieved from the code cache at print time.\n"
2208 " Due to the living nature of the code heap and because the CodeCache_lock\n"
2209 " is not continuously held, the displayed name might be wrong or no name\n"
2210 " might be found at all. The likelihood for that to happen increases\n"
2211 " over time passed between aggregtion and print steps.\n");
2212 BUFFEREDSTREAM_FLUSH_LOCKED("")
2213
2214 for (unsigned int ix = 0; ix < alloc_granules; ix++) {
2215 //---< print a new blob on a new line >---
2216 if (ix%granules_per_line == 0) {
2217 if (!name_in_addr_range) {
2218 ast->print_cr("No methods, blobs, or stubs found in this address range");
2219 }
2220 name_in_addr_range = false;
2221
2222 size_t end_ix = (ix+granules_per_line <= alloc_granules) ? ix+granules_per_line : alloc_granules;
2223 ast->cr();
2224 ast->print_cr("--------------------------------------------------------------------");
2225 ast->print_cr("Address range [" INTPTR_FORMAT "," INTPTR_FORMAT "), " SIZE_FORMAT "k", p2i(low_bound+ix*granule_size), p2i(low_bound + end_ix*granule_size), (end_ix - ix)*granule_size/(size_t)K);
2226 ast->print_cr("--------------------------------------------------------------------");
2227 BUFFEREDSTREAM_FLUSH_AUTO("")
2228 }
2229 // Only check granule if it contains at least one blob.
2230 unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
2231 StatArray[ix].stub_count + StatArray[ix].dead_count;
2232 if (nBlobs > 0 ) {
2233 for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
2234 // heap->find_start() is safe. Only works on _segmap.
2235 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
2236 char* this_seg = low_bound + ix*granule_size + is;
2237 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(this_seg));
2238 bool blob_is_safe = blob_access_is_safe(this_blob, NULL);
2239 // blob could have been flushed, freed, and merged.
2240 // this_blob < last_blob is an indicator for that.
2241 if (blob_is_safe && (this_blob > last_blob)) {
2242 last_blob = this_blob;
2243
2244 //---< get type and name >---
2245 blobType cbType = noType;
2246 if (segment_granules) {
2247 cbType = (blobType)StatArray[ix].type;
2248 } else {
2249 //---< access these fields only if we own the CodeCache_lock >---
2250 if (have_CodeCache_lock) {
2251 cbType = get_cbType(this_blob);
2252 }
2253 }
2254
2255 //---< access these fields only if we own the CodeCache_lock >---
2256 const char* blob_name = "<unavailable>";
2257 nmethod* nm = NULL;
2258 if (have_CodeCache_lock) {
2259 blob_name = this_blob->name();
2260 nm = this_blob->as_nmethod_or_null();
2261 // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
2262 if ((blob_name == NULL) || !os::is_readable_pointer(blob_name)) {
2263 blob_name = "<unavailable>";
2264 }
2265 }
2266
2267 //---< print table header for new print range >---
2268 if (!name_in_addr_range) {
2269 name_in_addr_range = true;
2270 ast->fill_to(51);
2271 ast->print("%9s", "compiler");
2272 ast->fill_to(61);
2273 ast->print_cr("%6s", "method");
2274 ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
2275 BUFFEREDSTREAM_FLUSH_AUTO("")
2276 }
2277
2278 //---< print line prefix (address and offset from CodeHeap start) >---
2279 ast->print(INTPTR_FORMAT, p2i(this_blob));
2280 ast->fill_to(19);
2281 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
2282 ast->fill_to(33);
2283
2284 // access nmethod and Method fields only if we own the CodeCache_lock.
2285 // This fact is implicitly transported via nm != NULL.
2286 if (CompiledMethod::nmethod_access_is_safe(nm)) {
2287 Method* method = nm->method();
2288 ResourceMark rm;
2289 //---< collect all data to locals as quickly as possible >---
2290 unsigned int total_size = nm->total_size();
2291 int hotness = nm->hotness_counter();
2292 bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
2293 //---< nMethod size in hex >---
2294 ast->print(PTR32_FORMAT, total_size);
2295 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
2296 //---< compiler information >---
2297 ast->fill_to(51);
2298 ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
2299 //---< method temperature >---
2300 ast->fill_to(62);
2301 ast->print("%5d", hotness);
2302 //---< name and signature >---
2303 ast->fill_to(62+6);
2304 ast->print("%s", blobTypeName[cbType]);
2305 ast->fill_to(82+6);
2306 if (cbType == nMethod_dead) {
2472 ast->print("|");
2473 }
2474 ast->cr();
2475
2476 // can't use BUFFEREDSTREAM_FLUSH_IF("", 512) here.
2477 // can't use this expression. bufferedStream::capacity() does not exist.
2478 // if ((ast->capacity() - ast->size()) < 512) {
2479 // Assume instead that default bufferedStream capacity (4K) was used.
2480 if (ast->size() > 3*K) {
2481 ttyLocker ttyl;
2482 out->print("%s", ast->as_string());
2483 ast->reset();
2484 }
2485
2486 ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
2487 ast->fill_to(19);
2488 ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
2489 }
2490 }
2491
2492 CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
2493 if ((cb != NULL) && os::is_readable_pointer(cb)) {
2494 if (cb->is_runtime_stub()) return runtimeStub;
2495 if (cb->is_deoptimization_stub()) return deoptimizationStub;
2496 if (cb->is_uncommon_trap_stub()) return uncommonTrapStub;
2497 if (cb->is_exception_stub()) return exceptionStub;
2498 if (cb->is_safepoint_stub()) return safepointStub;
2499 if (cb->is_adapter_blob()) return adapterBlob;
2500 if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
2501 if (cb->is_buffer_blob()) return bufferBlob;
2502
2503 //---< access these fields only if we own the CodeCache_lock >---
2504 // Should be ensured by caller. aggregate() amd print_names() do that.
2505 if (CodeCache_lock->owned_by_self()) {
2506 nmethod* nm = cb->as_nmethod_or_null();
2507 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
2508 if (nm->is_not_installed()) return nMethod_inconstruction;
2509 if (nm->is_zombie()) return nMethod_dead;
2510 if (nm->is_unloaded()) return nMethod_unloaded;
2511 if (nm->is_in_use()) return nMethod_inuse;
2512 if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused;
2513 if (nm->is_alive()) return nMethod_alive;
2514 return nMethod_dead;
2515 }
2516 }
2517 }
2518 return noType;
2519 }
2520
2521 bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob, CodeBlob* prev_blob) {
2522 return (this_blob != NULL) && // a blob must have been found, obviously
2523 ((this_blob == prev_blob) || (prev_blob == NULL)) && // when re-checking, the same blob must have been found
2524 (this_blob->header_size() >= 0) &&
2525 (this_blob->relocation_size() >= 0) &&
2526 ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
2527 ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin())) &&
2528 os::is_readable_pointer((address)(this_blob->relocation_begin())) &&
2529 os::is_readable_pointer(this_blob->content_begin());
2530 }
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "runtime/safepoint.hpp"
30 #include "runtime/sweeper.hpp"
31 #include "utilities/powerOfTwo.hpp"
32
33 // -------------------------
34 // | General Description |
35 // -------------------------
36 // The CodeHeap state analytics are divided in two parts.
37 // The first part examines the entire CodeHeap and aggregates all
38 // information that is believed useful/important.
39 //
40 // Aggregation condenses the information of a piece of the CodeHeap
41 // (4096 bytes by default) into an analysis granule. These granules
42 // contain enough detail to gain initial insight while keeping the
43 // internal structure sizes in check.
44 //
45 // The second part, which consists of several, independent steps,
46 // prints the previously collected information with emphasis on
47 // various aspects.
48 //
49 // The CodeHeap is a living thing. Therefore, protection against concurrent
215 , "nMethod (active)"
216 , "nMethod (inactive)"
217 , "nMethod (deopt)"
218 , "nMethod (zombie)"
219 , "nMethod (unloaded)"
220 , "runtime stub"
221 , "ricochet stub"
222 , "deopt stub"
223 , "uncommon trap stub"
224 , "exception stub"
225 , "safepoint stub"
226 , "adapter blob"
227 , "MH adapter blob"
228 , "buffer blob"
229 , "lastType"
230 };
231 const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
232
233 // Be prepared for ten different CodeHeap segments. Should be enough for a few years.
234 const unsigned int nSizeDistElements = 31; // logarithmic range growth, max size: 2**32
235 const unsigned int maxTopSizeBlocks = 100;
236 const unsigned int tsbStopper = 2 * maxTopSizeBlocks;
237 const unsigned int maxHeaps = 10;
238 static unsigned int nHeaps = 0;
239 static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
240
241 // static struct StatElement *StatArray = NULL;
242 static StatElement* StatArray = NULL;
243 static int log2_seg_size = 0;
244 static size_t seg_size = 0;
245 static size_t alloc_granules = 0;
246 static size_t granule_size = 0;
247 static bool segment_granules = false;
248 static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only.
249 static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only.
250 static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only.
251 static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only.
252 static unsigned int nBlocks_inconstr = 0; // counting "inconstruction" nmethods only. This is a transient state.
253 static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state.
254 static unsigned int nBlocks_stub = 0;
255
480
481 void CodeHeapState::discard_StatArray(outputStream* out) {
482 if (StatArray != NULL) {
483 delete StatArray;
484 StatArray = NULL;
485 alloc_granules = 0;
486 granule_size = 0;
487 }
488 }
489
490 void CodeHeapState::discard_FreeArray(outputStream* out) {
491 if (FreeArray != NULL) {
492 delete[] FreeArray;
493 FreeArray = NULL;
494 alloc_freeBlocks = 0;
495 }
496 }
497
498 void CodeHeapState::discard_TopSizeArray(outputStream* out) {
499 if (TopSizeArray != NULL) {
500 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
501 if (TopSizeArray[i].blob_name != NULL) {
502 os::free((void*)TopSizeArray[i].blob_name);
503 }
504 }
505 delete[] TopSizeArray;
506 TopSizeArray = NULL;
507 alloc_topSizeBlocks = 0;
508 used_topSizeBlocks = 0;
509 }
510 }
511
512 void CodeHeapState::discard_SizeDistArray(outputStream* out) {
513 if (SizeDistributionArray != NULL) {
514 delete[] SizeDistributionArray;
515 SizeDistributionArray = NULL;
516 }
517 }
518
519 // Discard all allocated internal data structures.
520 // This should be done after an analysis session is completed.
521 void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
522 if (!initialization_complete) {
523 return;
524 }
578 BUFFEREDSTREAM_FLUSH("")
579 }
580 get_HeapStatGlobals(out, heapName);
581
582
583 // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
584 // all heap information is "constant" and can be safely extracted/calculated before we
585 // enter the while() loop. Actually, the loop will only be iterated once.
586 char* low_bound = heap->low_boundary();
587 size_t size = heap->capacity();
588 size_t res_size = heap->max_capacity();
589 seg_size = heap->segment_size();
590 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size); // This is a global static value.
591
592 if (seg_size == 0) {
593 printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
594 BUFFEREDSTREAM_FLUSH("")
595 return;
596 }
597
598 if (!holding_required_locks()) {
599 printBox(ast, '-', "Must be at safepoint or hold Compile_lock and CodeCache_lock when calling aggregate function for ", heapName);
600 BUFFEREDSTREAM_FLUSH("")
601 return;
602 }
603
604 // Calculate granularity of analysis (and output).
605 // The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
606 // The CodeHeap can become fairly large, in particular in productive real-life systems.
607 //
608 // It is often neither feasible nor desirable to aggregate the data with the highest possible
609 // level of detail, i.e. inspecting and printing each segment on its own.
610 //
611 // The granularity parameter allows to specify the level of detail available in the analysis.
612 // It must be a positive multiple of the segment size and should be selected such that enough
613 // detail is provided while, at the same time, the printed output does not explode.
614 //
615 // By manipulating the granularity value, we enforce that at least min_granules units
616 // of analysis are available. We also enforce an upper limit of max_granules units to
617 // keep the amount of allocated storage in check.
618 //
619 // Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
742 if (ix_beg > ix_end) {
743 insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
744 }
745 if (insane) {
746 BUFFEREDSTREAM_FLUSH("")
747 continue;
748 }
749
750 if (h->free()) {
751 nBlocks_free++;
752 freeSpace += hb_bytelen;
753 if (hb_bytelen > maxFreeSize) {
754 maxFreeSize = hb_bytelen;
755 maxFreeBlock = h;
756 }
757 } else {
758 update_SizeDistArray(out, hb_len);
759 nBlocks_used++;
760 usedSpace += hb_bytelen;
761 CodeBlob* cb = (CodeBlob*)heap->find_start(h);
762 cbType = get_cbType(cb); // Will check for cb == NULL and other safety things.
763 if (cbType != noType) {
764 const char* blob_name = os::strdup(cb->name());
765 unsigned int nm_size = 0;
766 int temperature = 0;
767 nmethod* nm = cb->as_nmethod_or_null();
768 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
769 ResourceMark rm;
770 Method* method = nm->method();
771 if (nm->is_in_use()) {
772 blob_name = os::strdup(method->name_and_sig_as_C_string());
773 }
774 if (nm->is_not_entrant()) {
775 blob_name = os::strdup(method->name_and_sig_as_C_string());
776 }
777
778 nm_size = nm->total_size();
779 compile_id = nm->compile_id();
780 comp_lvl = (CompLevel)(nm->comp_level());
781 if (nm->is_compiled_by_c1()) {
782 cType = c1;
783 }
784 if (nm->is_compiled_by_c2()) {
785 cType = c2;
786 }
787 if (nm->is_compiled_by_jvmci()) {
788 cType = jvmci;
789 }
790 switch (cbType) {
791 case nMethod_inuse: { // only for executable methods!!!
792 // space for these cbs is accounted for later.
793 temperature = nm->hotness_counter();
794 hotnessAccumulator += temperature;
795 n_methods++;
796 maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
797 minTemp = (temperature < minTemp) ? temperature : minTemp;
798 break;
799 }
800 case nMethod_notused:
801 nBlocks_alive++;
802 nBlocks_disconn++;
803 aliveSpace += hb_bytelen;
804 disconnSpace += hb_bytelen;
805 break;
806 case nMethod_notentrant: // equivalent to nMethod_alive
807 nBlocks_alive++;
808 nBlocks_notentr++;
809 aliveSpace += hb_bytelen;
810 notentrSpace += hb_bytelen;
811 break;
812 case nMethod_unloaded:
813 nBlocks_unloaded++;
815 break;
816 case nMethod_dead:
817 nBlocks_dead++;
818 deadSpace += hb_bytelen;
819 break;
820 case nMethod_inconstruction:
821 nBlocks_inconstr++;
822 inconstrSpace += hb_bytelen;
823 break;
824 default:
825 break;
826 }
827 }
828
829 //------------------------------------------
830 //---< register block in TopSizeArray >---
831 //------------------------------------------
832 if (alloc_topSizeBlocks > 0) {
833 if (used_topSizeBlocks == 0) {
834 TopSizeArray[0].start = h;
835 TopSizeArray[0].blob_name = blob_name;
836 TopSizeArray[0].len = hb_len;
837 TopSizeArray[0].index = tsbStopper;
838 TopSizeArray[0].nm_size = nm_size;
839 TopSizeArray[0].temperature = temperature;
840 TopSizeArray[0].compiler = cType;
841 TopSizeArray[0].level = comp_lvl;
842 TopSizeArray[0].type = cbType;
843 currMax = hb_len;
844 currMin = hb_len;
845 currMin_ix = 0;
846 used_topSizeBlocks++;
847 blob_name = NULL; // indicate blob_name was consumed
848 // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
849 } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
850 //---< all blocks in list are larger, but there is room left in array >---
851 TopSizeArray[currMin_ix].index = used_topSizeBlocks;
852 TopSizeArray[used_topSizeBlocks].start = h;
853 TopSizeArray[used_topSizeBlocks].blob_name = blob_name;
854 TopSizeArray[used_topSizeBlocks].len = hb_len;
855 TopSizeArray[used_topSizeBlocks].index = tsbStopper;
856 TopSizeArray[used_topSizeBlocks].nm_size = nm_size;
857 TopSizeArray[used_topSizeBlocks].temperature = temperature;
858 TopSizeArray[used_topSizeBlocks].compiler = cType;
859 TopSizeArray[used_topSizeBlocks].level = comp_lvl;
860 TopSizeArray[used_topSizeBlocks].type = cbType;
861 currMin = hb_len;
862 currMin_ix = used_topSizeBlocks;
863 used_topSizeBlocks++;
864 blob_name = NULL; // indicate blob_name was consumed
865 } else {
866 // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
867 // We don't need to search the list if we know beforehand that the current block size is
868 // smaller than the currently recorded minimum and there is no free entry left in the list.
869 if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
870 if (currMax < hb_len) {
871 currMax = hb_len;
872 }
873 unsigned int i;
874 unsigned int prev_i = tsbStopper;
875 unsigned int limit_i = 0;
876 for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
877 if (limit_i++ >= alloc_topSizeBlocks) {
878 insane = true; break; // emergency exit
879 }
880 if (i >= used_topSizeBlocks) {
881 insane = true; break; // emergency exit
882 }
883 total_iterations++;
884 if (TopSizeArray[i].len < hb_len) {
885 //---< We want to insert here, element <i> is smaller than the current one >---
886 if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
887 // old entry gets moved to the next free element of the array.
888 // That's necessary to keep the entry for the largest block at index 0.
889 // This move might cause the current minimum to be moved to another place
890 if (i == currMin_ix) {
891 assert(TopSizeArray[i].len == currMin, "sort error");
892 currMin_ix = used_topSizeBlocks;
893 }
894 memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
895 TopSizeArray[i].start = h;
896 TopSizeArray[i].blob_name = blob_name;
897 TopSizeArray[i].len = hb_len;
898 TopSizeArray[i].index = used_topSizeBlocks;
899 TopSizeArray[i].nm_size = nm_size;
900 TopSizeArray[i].temperature = temperature;
901 TopSizeArray[i].compiler = cType;
902 TopSizeArray[i].level = comp_lvl;
903 TopSizeArray[i].type = cbType;
904 used_topSizeBlocks++;
905 blob_name = NULL; // indicate blob_name was consumed
906 } else { // no room for new entries, current block replaces entry for smallest block
907 //---< Find last entry (entry for smallest remembered block) >---
908 // We either want to insert right before the smallest entry, which is when <i>
909 // indexes the smallest entry. We then just overwrite the smallest entry.
910 // What's more likely:
911 // We want to insert somewhere in the list. The smallest entry (@<j>) then falls off the cliff.
912 // The element at the insert point <i> takes it's slot. The second-smallest entry now becomes smallest.
913 // Data of the current block is filled in at index <i>.
914 unsigned int j = i;
915 unsigned int prev_j = tsbStopper;
916 unsigned int limit_j = 0;
917 while (TopSizeArray[j].index != tsbStopper) {
918 if (limit_j++ >= alloc_topSizeBlocks) {
919 insane = true; break; // emergency exit
920 }
921 if (j >= used_topSizeBlocks) {
922 insane = true; break; // emergency exit
923 }
924 total_iterations++;
925 prev_j = j;
926 j = TopSizeArray[j].index;
927 }
928 if (!insane) {
929 if (TopSizeArray[j].blob_name != NULL) {
930 os::free((void*)TopSizeArray[j].blob_name);
931 }
932 if (prev_j == tsbStopper) {
933 //---< Above while loop did not iterate, we already are the min entry >---
934 //---< We have to just replace the smallest entry >---
935 currMin = hb_len;
936 currMin_ix = j;
937 TopSizeArray[j].start = h;
938 TopSizeArray[j].blob_name = blob_name;
939 TopSizeArray[j].len = hb_len;
940 TopSizeArray[j].index = tsbStopper; // already set!!
941 TopSizeArray[i].nm_size = nm_size;
942 TopSizeArray[i].temperature = temperature;
943 TopSizeArray[j].compiler = cType;
944 TopSizeArray[j].level = comp_lvl;
945 TopSizeArray[j].type = cbType;
946 } else {
947 //---< second-smallest entry is now smallest >---
948 TopSizeArray[prev_j].index = tsbStopper;
949 currMin = TopSizeArray[prev_j].len;
950 currMin_ix = prev_j;
951 //---< previously smallest entry gets overwritten >---
952 memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
953 TopSizeArray[i].start = h;
954 TopSizeArray[i].blob_name = blob_name;
955 TopSizeArray[i].len = hb_len;
956 TopSizeArray[i].index = j;
957 TopSizeArray[i].nm_size = nm_size;
958 TopSizeArray[i].temperature = temperature;
959 TopSizeArray[i].compiler = cType;
960 TopSizeArray[i].level = comp_lvl;
961 TopSizeArray[i].type = cbType;
962 }
963 blob_name = NULL; // indicate blob_name was consumed
964 } // insane
965 }
966 break;
967 }
968 prev_i = i;
969 }
970 if (insane) {
971 // Note: regular analysis could probably continue by resetting "insane" flag.
972 out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
973 discard_TopSizeArray(out);
974 }
975 }
976 }
977 }
978 if (blob_name != NULL) {
979 os::free((void*)blob_name);
980 blob_name = NULL;
981 }
982 //----------------------------------------------
983 //---< END register block in TopSizeArray >---
984 //----------------------------------------------
985 } else {
986 nBlocks_zomb++;
987 }
988
989 if (ix_beg == ix_end) {
990 StatArray[ix_beg].type = cbType;
991 switch (cbType) {
992 case nMethod_inuse:
993 highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
994 if (comp_lvl < CompLevel_full_optimization) {
995 nBlocks_t1++;
996 t1Space += hb_bytelen;
997 StatArray[ix_beg].t1_count++;
998 StatArray[ix_beg].t1_space += (unsigned short)hb_len;
999 StatArray[ix_beg].t1_age = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
1000 } else {
1001 nBlocks_t2++;
1310 ast->print_cr("Free block count mismatch could not be resolved.");
1311 ast->print_cr("Try to run \"aggregate\" function to update counters");
1312 }
1313 BUFFEREDSTREAM_FLUSH("")
1314
1315 //---< discard old array and update global values >---
1316 discard_FreeArray(out);
1317 set_HeapStatGlobals(out, heapName);
1318 return;
1319 }
1320
1321 //---< calculate and fill remaining fields >---
1322 if (FreeArray != NULL) {
1323 // This loop is intentionally printing directly to "out".
1324 // It should not print anything, anyway.
1325 for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
1326 size_t lenSum = 0;
1327 FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
1328 for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
1329 CodeBlob *cb = (CodeBlob*)(heap->find_start(h));
1330 if ((cb != NULL) && os::is_readable_pointer(cb) && !cb->is_nmethod()) { // checks equivalent to those in get_cbType()
1331 FreeArray[ix].stubs_in_gap = true;
1332 }
1333 FreeArray[ix].n_gapBlocks++;
1334 lenSum += h->length()<<log2_seg_size;
1335 if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
1336 out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
1337 }
1338 }
1339 if (lenSum != FreeArray[ix].gap) {
1340 out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
1341 }
1342 }
1343 }
1344 set_HeapStatGlobals(out, heapName);
1345
1346 printBox(ast, '=', "C O D E H E A P A N A L Y S I S C O M P L E T E for segment ", heapName);
1347 BUFFEREDSTREAM_FLUSH("\n")
1348 }
1349
1350
1362 BUFFEREDSTREAM_DECL(ast, out)
1363
1364 {
1365 printBox(ast, '=', "U S E D S P A C E S T A T I S T I C S for ", heapName);
1366 ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
1367 " and other identifying information with the block size data.\n"
1368 "\n"
1369 " Method names are dynamically retrieved from the code cache at print time.\n"
1370 " Due to the living nature of the code cache and because the CodeCache_lock\n"
1371 " is not continuously held, the displayed name might be wrong or no name\n"
1372 " might be found at all. The likelihood for that to happen increases\n"
1373 " over time passed between analysis and print step.\n", used_topSizeBlocks);
1374 BUFFEREDSTREAM_FLUSH_LOCKED("\n")
1375 }
1376
1377 //----------------------------
1378 //-- Print Top Used Blocks --
1379 //----------------------------
1380 {
1381 char* low_bound = heap->low_boundary();
1382
1383 printBox(ast, '-', "Largest Used Blocks in ", heapName);
1384 print_blobType_legend(ast);
1385
1386 ast->fill_to(51);
1387 ast->print("%4s", "blob");
1388 ast->fill_to(56);
1389 ast->print("%9s", "compiler");
1390 ast->fill_to(66);
1391 ast->print_cr("%6s", "method");
1392 ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name");
1393 BUFFEREDSTREAM_FLUSH_LOCKED("")
1394
1395 //---< print Top Ten Used Blocks >---
1396 if (used_topSizeBlocks > 0) {
1397 unsigned int printed_topSizeBlocks = 0;
1398 for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
1399 printed_topSizeBlocks++;
1400 if (TopSizeArray[i].blob_name == NULL) {
1401 TopSizeArray[i].blob_name = os::strdup("unnamed blob or blob name unavailable");
1402 }
1403 // heap->find_start() is safe. Only works on _segmap.
1404 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
1405 HeapBlock* heapBlock = TopSizeArray[i].start;
1406 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(heapBlock));
1407 if (this_blob != NULL) {
1408 //---< access these fields only if we own the CodeCache_lock >---
1409 //---< blob address >---
1410 ast->print(INTPTR_FORMAT, p2i(this_blob));
1411 ast->fill_to(19);
1412 //---< blob offset from CodeHeap begin >---
1413 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
1414 ast->fill_to(33);
1415 } else {
1416 //---< block address >---
1417 ast->print(INTPTR_FORMAT, p2i(TopSizeArray[i].start));
1418 ast->fill_to(19);
1419 //---< block offset from CodeHeap begin >---
1420 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
1421 ast->fill_to(33);
1422 }
1423
1424 //---< print size, name, and signature (for nMethods) >---
1425 bool is_nmethod = TopSizeArray[i].nm_size > 0;
1426 if (is_nmethod) {
1427 //---< nMethod size in hex >---
1428 ast->print(PTR32_FORMAT, TopSizeArray[i].nm_size);
1429 ast->print("(" SIZE_FORMAT_W(4) "K)", TopSizeArray[i].nm_size/K);
1430 ast->fill_to(51);
1431 ast->print(" %c", blobTypeChar[TopSizeArray[i].type]);
1432 //---< compiler information >---
1433 ast->fill_to(56);
1434 ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
1435 //---< method temperature >---
1436 ast->fill_to(67);
1437 ast->print("%5d", TopSizeArray[i].temperature);
1438 //---< name and signature >---
1439 ast->fill_to(67+6);
1440 if (TopSizeArray[i].type == nMethod_inconstruction) {
1441 ast->print(" not (yet) installed method ");
1442 }
1443 if (TopSizeArray[i].type == nMethod_dead) {
1444 ast->print(" zombie method ");
1445 }
1446 ast->print("%s", TopSizeArray[i].blob_name);
1447 } else {
1448 //---< block size in hex >---
1449 ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
1450 ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
1451 //---< no compiler information >---
1452 ast->fill_to(56);
1453 //---< name and signature >---
1454 ast->fill_to(67+6);
1455 ast->print("%s", TopSizeArray[i].blob_name);
1456 }
1457 ast->cr();
1458 BUFFEREDSTREAM_FLUSH_AUTO("")
1459 }
1460 if (used_topSizeBlocks != printed_topSizeBlocks) {
1461 ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
1462 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
1463 ast->print_cr(" TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
1464 BUFFEREDSTREAM_FLUSH_AUTO("")
1465 }
1466 }
1467 BUFFEREDSTREAM_FLUSH("\n\n")
1468 }
1469 }
1470
1471 //-----------------------------
1472 //-- Print Usage Histogram --
1473 //-----------------------------
1474
1475 if (SizeDistributionArray != NULL) {
2216 }
2217
2218
2219 void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
2220 if (!initialization_complete) {
2221 return;
2222 }
2223
2224 const char* heapName = get_heapName(heap);
2225 get_HeapStatGlobals(out, heapName);
2226
2227 if ((StatArray == NULL) || (alloc_granules == 0)) {
2228 return;
2229 }
2230 BUFFEREDSTREAM_DECL(ast, out)
2231
2232 unsigned int granules_per_line = 128;
2233 char* low_bound = heap->low_boundary();
2234 CodeBlob* last_blob = NULL;
2235 bool name_in_addr_range = true;
2236 bool have_locks = holding_required_locks();
2237
2238 //---< print at least 128K per block (i.e. between headers) >---
2239 if (granules_per_line*granule_size < 128*K) {
2240 granules_per_line = (unsigned int)((128*K)/granule_size);
2241 }
2242
2243 printBox(ast, '=', "M E T H O D N A M E S for ", heapName);
2244 ast->print_cr(" Method names are dynamically retrieved from the code cache at print time.\n"
2245 " Due to the living nature of the code heap and because the CodeCache_lock\n"
2246 " is not continuously held, the displayed name might be wrong or no name\n"
2247 " might be found at all. The likelihood for that to happen increases\n"
2248 " over time passed between aggregation and print steps.\n");
2249 BUFFEREDSTREAM_FLUSH_LOCKED("")
2250
2251 for (unsigned int ix = 0; ix < alloc_granules; ix++) {
2252 //---< print a new blob on a new line >---
2253 if (ix%granules_per_line == 0) {
2254 if (!name_in_addr_range) {
2255 ast->print_cr("No methods, blobs, or stubs found in this address range");
2256 }
2257 name_in_addr_range = false;
2258
2259 size_t end_ix = (ix+granules_per_line <= alloc_granules) ? ix+granules_per_line : alloc_granules;
2260 ast->cr();
2261 ast->print_cr("--------------------------------------------------------------------");
2262 ast->print_cr("Address range [" INTPTR_FORMAT "," INTPTR_FORMAT "), " SIZE_FORMAT "k", p2i(low_bound+ix*granule_size), p2i(low_bound + end_ix*granule_size), (end_ix - ix)*granule_size/(size_t)K);
2263 ast->print_cr("--------------------------------------------------------------------");
2264 BUFFEREDSTREAM_FLUSH_AUTO("")
2265 }
2266 // Only check granule if it contains at least one blob.
2267 unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
2268 StatArray[ix].stub_count + StatArray[ix].dead_count;
2269 if (nBlobs > 0 ) {
2270 for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
2271 // heap->find_start() is safe. Only works on _segmap.
2272 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
2273 char* this_seg = low_bound + ix*granule_size + is;
2274 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(this_seg));
2275 bool blob_is_safe = blob_access_is_safe(this_blob, NULL);
2276 // blob could have been flushed, freed, and merged.
2277 // this_blob < last_blob is an indicator for that.
2278 if (blob_is_safe && (this_blob > last_blob)) {
2279 last_blob = this_blob;
2280
2281 //---< get type and name >---
2282 blobType cbType = noType;
2283 if (segment_granules) {
2284 cbType = (blobType)StatArray[ix].type;
2285 } else {
2286 //---< access these fields only if we own the CodeCache_lock >---
2287 if (have_locks) {
2288 cbType = get_cbType(this_blob);
2289 }
2290 }
2291
2292 //---< access these fields only if we own the CodeCache_lock >---
2293 const char* blob_name = "<unavailable>";
2294 nmethod* nm = NULL;
2295 if (have_locks) {
2296 blob_name = this_blob->name();
2297 nm = this_blob->as_nmethod_or_null();
2298 // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
2299 if ((blob_name == NULL) || !os::is_readable_pointer(blob_name)) {
2300 blob_name = "<unavailable>";
2301 }
2302 }
2303
2304 //---< print table header for new print range >---
2305 if (!name_in_addr_range) {
2306 name_in_addr_range = true;
2307 ast->fill_to(51);
2308 ast->print("%9s", "compiler");
2309 ast->fill_to(61);
2310 ast->print_cr("%6s", "method");
2311 ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
2312 BUFFEREDSTREAM_FLUSH_AUTO("")
2313 }
2314
2315 //---< print line prefix (address and offset from CodeHeap start) >---
2316 ast->print(INTPTR_FORMAT, p2i(this_blob));
2317 ast->fill_to(19);
2318 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
2319 ast->fill_to(33);
2320
2321 // access nmethod and Method fields only if we own the CodeCache_lock.
2322 // This fact is implicitly transported via nm != NULL.
2323 if (nmethod::access_is_safe(nm)) {
2324 Method* method = nm->method();
2325 ResourceMark rm;
2326 //---< collect all data to locals as quickly as possible >---
2327 unsigned int total_size = nm->total_size();
2328 int hotness = nm->hotness_counter();
2329 bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
2330 //---< nMethod size in hex >---
2331 ast->print(PTR32_FORMAT, total_size);
2332 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
2333 //---< compiler information >---
2334 ast->fill_to(51);
2335 ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
2336 //---< method temperature >---
2337 ast->fill_to(62);
2338 ast->print("%5d", hotness);
2339 //---< name and signature >---
2340 ast->fill_to(62+6);
2341 ast->print("%s", blobTypeName[cbType]);
2342 ast->fill_to(82+6);
2343 if (cbType == nMethod_dead) {
2509 ast->print("|");
2510 }
2511 ast->cr();
2512
2513 // can't use BUFFEREDSTREAM_FLUSH_IF("", 512) here.
2514 // can't use this expression. bufferedStream::capacity() does not exist.
2515 // if ((ast->capacity() - ast->size()) < 512) {
2516 // Assume instead that default bufferedStream capacity (4K) was used.
2517 if (ast->size() > 3*K) {
2518 ttyLocker ttyl;
2519 out->print("%s", ast->as_string());
2520 ast->reset();
2521 }
2522
2523 ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
2524 ast->fill_to(19);
2525 ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
2526 }
2527 }
2528
2529 // Find out which blob type we have at hand.
2530 // Return "noType" if anything abnormal is detected.
2531 CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
2532 if ((cb != NULL) && os::is_readable_pointer(cb)) {
2533 if (cb->is_runtime_stub()) return runtimeStub;
2534 if (cb->is_deoptimization_stub()) return deoptimizationStub;
2535 if (cb->is_uncommon_trap_stub()) return uncommonTrapStub;
2536 if (cb->is_exception_stub()) return exceptionStub;
2537 if (cb->is_safepoint_stub()) return safepointStub;
2538 if (cb->is_adapter_blob()) return adapterBlob;
2539 if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
2540 if (cb->is_buffer_blob()) return bufferBlob;
2541
2542 //---< access these fields only if we own CodeCache_lock and Compile_lock >---
2543 // Should be ensured by caller. aggregate() and print_names() do that.
2544 if (holding_required_locks()) {
2545 nmethod* nm = cb->as_nmethod_or_null();
2546 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
2547 if (nm->is_not_installed()) return nMethod_inconstruction;
2548 if (nm->is_zombie()) return nMethod_dead;
2549 if (nm->is_unloaded()) return nMethod_unloaded;
2550 if (nm->is_in_use()) return nMethod_inuse;
2551 if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused;
2552 if (nm->is_alive()) return nMethod_alive;
2553 return nMethod_dead;
2554 }
2555 }
2556 }
2557 return noType;
2558 }
2559
2560 bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob, CodeBlob* prev_blob) {
2561 return (this_blob != NULL) && // a blob must have been found, obviously
2562 ((this_blob == prev_blob) || (prev_blob == NULL)) && // when re-checking, the same blob must have been found
2563 (this_blob->header_size() >= 0) &&
2564 (this_blob->relocation_size() >= 0) &&
2565 ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
2566 ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin())) &&
2567 os::is_readable_pointer((address)(this_blob->relocation_begin())) &&
2568 os::is_readable_pointer(this_blob->content_begin());
2569 }
2570
2571 bool CodeHeapState::holding_required_locks() {
2572 return SafepointSynchronize::is_at_safepoint() ||
2573 (CodeCache_lock->owned_by_self() && Compile_lock->owned_by_self());
2574 }
|