9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "runtime/sweeper.hpp"
30
31 // -------------------------
32 // | General Description |
33 // -------------------------
34 // The CodeHeap state analytics are divided in two parts.
35 // The first part examines the entire CodeHeap and aggregates all
36 // information that is believed useful/important.
37 //
38 // Aggregation condenses the information of a piece of the CodeHeap
39 // (4096 bytes by default) into an analysis granule. These granules
40 // contain enough detail to gain initial insight while keeping the
41 // internal sttructure sizes in check.
42 //
43 // The second part, which consists of several, independent steps,
44 // prints the previously collected information with emphasis on
45 // various aspects.
46 //
47 // The CodeHeap is a living thing. Therefore, protection against concurrent
48 // modification (by acquiring the CodeCache_lock) is necessary. It has
195 #define BUFFEREDSTREAM_FLUSH(_termString) \
196 if (((_termString) != NULL) && (strlen(_termString) > 0)){\
197 _outbuf->print("%s", _termString); \
198 }
199
200 #define BUFFEREDSTREAM_FLUSH_IF(_termString, _remSize) \
201 BUFFEREDSTREAM_FLUSH(_termString)
202
203 #define BUFFEREDSTREAM_FLUSH_AUTO(_termString) \
204 BUFFEREDSTREAM_FLUSH(_termString)
205
206 #define BUFFEREDSTREAM_FLUSH_LOCKED(_termString) \
207 BUFFEREDSTREAM_FLUSH(_termString)
208
209 #define BUFFEREDSTREAM_FLUSH_STAT()
210 #endif
211 #define HEX32_FORMAT "0x%x" // just a helper format string used below multiple times
212
213 const char blobTypeChar[] = {' ', 'C', 'N', 'I', 'X', 'Z', 'U', 'R', '?', 'D', 'T', 'E', 'S', 'A', 'M', 'B', 'L' };
214 const char* blobTypeName[] = {"noType"
215 , "nMethod (under construction)"
216 , "nMethod (active)"
217 , "nMethod (inactive)"
218 , "nMethod (deopt)"
219 , "nMethod (zombie)"
220 , "nMethod (unloaded)"
221 , "runtime stub"
222 , "ricochet stub"
223 , "deopt stub"
224 , "uncommon trap stub"
225 , "exception stub"
226 , "safepoint stub"
227 , "adapter blob"
228 , "MH adapter blob"
229 , "buffer blob"
230 , "lastType"
231 };
232 const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
233
234 // Be prepared for ten different CodeHeap segments. Should be enough for a few years.
235 const unsigned int nSizeDistElements = 31; // logarithmic range growth, max size: 2**32
236 const unsigned int maxTopSizeBlocks = 50;
237 const unsigned int tsbStopper = 2 * maxTopSizeBlocks;
238 const unsigned int maxHeaps = 10;
239 static unsigned int nHeaps = 0;
240 static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
241
242 // static struct StatElement *StatArray = NULL;
243 static StatElement* StatArray = NULL;
244 static int log2_seg_size = 0;
245 static size_t seg_size = 0;
246 static size_t alloc_granules = 0;
247 static size_t granule_size = 0;
248 static bool segment_granules = false;
249 static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only.
250 static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only.
251 static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only.
252 static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only.
253 static unsigned int nBlocks_inconstr = 0; // counting "inconstruction" nmethods only. This is a transient state.
254 static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state.
255 static unsigned int nBlocks_stub = 0;
256
257 static struct FreeBlk* FreeArray = NULL;
258 static unsigned int alloc_freeBlocks = 0;
259
260 static struct TopSizeBlk* TopSizeArray = NULL;
261 static unsigned int alloc_topSizeBlocks = 0;
262 static unsigned int used_topSizeBlocks = 0;
263
264 static struct SizeDistributionElement* SizeDistributionArray = NULL;
265
266 // nMethod temperature (hotness) indicators.
267 static int avgTemp = 0;
268 static int maxTemp = 0;
269 static int minTemp = 0;
270
271 static unsigned int latest_compilation_id = 0;
272 static volatile bool initialization_complete = false;
273
304 } else {
305 nHeaps = 1;
306 CodeHeapStatArray[0].heapName = heapName;
307 return 0; // This is the default index if CodeCache is not segmented.
308 }
309 }
310
311 void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) {
312 unsigned int ix = findHeapIndex(out, heapName);
313 if (ix < maxHeaps) {
314 StatArray = CodeHeapStatArray[ix].StatArray;
315 seg_size = CodeHeapStatArray[ix].segment_size;
316 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size);
317 alloc_granules = CodeHeapStatArray[ix].alloc_granules;
318 granule_size = CodeHeapStatArray[ix].granule_size;
319 segment_granules = CodeHeapStatArray[ix].segment_granules;
320 nBlocks_t1 = CodeHeapStatArray[ix].nBlocks_t1;
321 nBlocks_t2 = CodeHeapStatArray[ix].nBlocks_t2;
322 nBlocks_alive = CodeHeapStatArray[ix].nBlocks_alive;
323 nBlocks_dead = CodeHeapStatArray[ix].nBlocks_dead;
324 nBlocks_inconstr = CodeHeapStatArray[ix].nBlocks_inconstr;
325 nBlocks_unloaded = CodeHeapStatArray[ix].nBlocks_unloaded;
326 nBlocks_stub = CodeHeapStatArray[ix].nBlocks_stub;
327 FreeArray = CodeHeapStatArray[ix].FreeArray;
328 alloc_freeBlocks = CodeHeapStatArray[ix].alloc_freeBlocks;
329 TopSizeArray = CodeHeapStatArray[ix].TopSizeArray;
330 alloc_topSizeBlocks = CodeHeapStatArray[ix].alloc_topSizeBlocks;
331 used_topSizeBlocks = CodeHeapStatArray[ix].used_topSizeBlocks;
332 SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
333 avgTemp = CodeHeapStatArray[ix].avgTemp;
334 maxTemp = CodeHeapStatArray[ix].maxTemp;
335 minTemp = CodeHeapStatArray[ix].minTemp;
336 } else {
337 StatArray = NULL;
338 seg_size = 0;
339 log2_seg_size = 0;
340 alloc_granules = 0;
341 granule_size = 0;
342 segment_granules = false;
343 nBlocks_t1 = 0;
344 nBlocks_t2 = 0;
345 nBlocks_alive = 0;
346 nBlocks_dead = 0;
347 nBlocks_inconstr = 0;
348 nBlocks_unloaded = 0;
349 nBlocks_stub = 0;
350 FreeArray = NULL;
351 alloc_freeBlocks = 0;
352 TopSizeArray = NULL;
353 alloc_topSizeBlocks = 0;
354 used_topSizeBlocks = 0;
355 SizeDistributionArray = NULL;
356 avgTemp = 0;
357 maxTemp = 0;
358 minTemp = 0;
359 }
360 }
361
362 void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) {
363 unsigned int ix = findHeapIndex(out, heapName);
364 if (ix < maxHeaps) {
365 CodeHeapStatArray[ix].StatArray = StatArray;
366 CodeHeapStatArray[ix].segment_size = seg_size;
367 CodeHeapStatArray[ix].alloc_granules = alloc_granules;
368 CodeHeapStatArray[ix].granule_size = granule_size;
369 CodeHeapStatArray[ix].segment_granules = segment_granules;
370 CodeHeapStatArray[ix].nBlocks_t1 = nBlocks_t1;
371 CodeHeapStatArray[ix].nBlocks_t2 = nBlocks_t2;
372 CodeHeapStatArray[ix].nBlocks_alive = nBlocks_alive;
373 CodeHeapStatArray[ix].nBlocks_dead = nBlocks_dead;
374 CodeHeapStatArray[ix].nBlocks_inconstr = nBlocks_inconstr;
375 CodeHeapStatArray[ix].nBlocks_unloaded = nBlocks_unloaded;
376 CodeHeapStatArray[ix].nBlocks_stub = nBlocks_stub;
377 CodeHeapStatArray[ix].FreeArray = FreeArray;
378 CodeHeapStatArray[ix].alloc_freeBlocks = alloc_freeBlocks;
379 CodeHeapStatArray[ix].TopSizeArray = TopSizeArray;
380 CodeHeapStatArray[ix].alloc_topSizeBlocks = alloc_topSizeBlocks;
381 CodeHeapStatArray[ix].used_topSizeBlocks = used_topSizeBlocks;
382 CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
383 CodeHeapStatArray[ix].avgTemp = avgTemp;
384 CodeHeapStatArray[ix].maxTemp = maxTemp;
385 CodeHeapStatArray[ix].minTemp = minTemp;
386 }
387 }
388
389 //---< get a new statistics array >---
390 void CodeHeapState::prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName) {
391 if (StatArray == NULL) {
392 StatArray = new StatElement[nElem];
393 //---< reset some counts >---
394 alloc_granules = nElem;
481
482 void CodeHeapState::discard_StatArray(outputStream* out) {
483 if (StatArray != NULL) {
484 delete StatArray;
485 StatArray = NULL;
486 alloc_granules = 0;
487 granule_size = 0;
488 }
489 }
490
491 void CodeHeapState::discard_FreeArray(outputStream* out) {
492 if (FreeArray != NULL) {
493 delete[] FreeArray;
494 FreeArray = NULL;
495 alloc_freeBlocks = 0;
496 }
497 }
498
499 void CodeHeapState::discard_TopSizeArray(outputStream* out) {
500 if (TopSizeArray != NULL) {
501 delete[] TopSizeArray;
502 TopSizeArray = NULL;
503 alloc_topSizeBlocks = 0;
504 used_topSizeBlocks = 0;
505 }
506 }
507
508 void CodeHeapState::discard_SizeDistArray(outputStream* out) {
509 if (SizeDistributionArray != NULL) {
510 delete[] SizeDistributionArray;
511 SizeDistributionArray = NULL;
512 }
513 }
514
515 // Discard all allocated internal data structures.
516 // This should be done after an analysis session is completed.
517 void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
518 if (!initialization_complete) {
519 return;
520 }
574 BUFFEREDSTREAM_FLUSH("")
575 }
576 get_HeapStatGlobals(out, heapName);
577
578
579 // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
580 // all heap information is "constant" and can be safely extracted/calculated before we
581 // enter the while() loop. Actually, the loop will only be iterated once.
582 char* low_bound = heap->low_boundary();
583 size_t size = heap->capacity();
584 size_t res_size = heap->max_capacity();
585 seg_size = heap->segment_size();
586 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size); // This is a global static value.
587
588 if (seg_size == 0) {
589 printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
590 BUFFEREDSTREAM_FLUSH("")
591 return;
592 }
593
594 if (!CodeCache_lock->owned_by_self()) {
595 printBox(ast, '-', "aggregate function called without holding the CodeCache_lock for ", heapName);
596 BUFFEREDSTREAM_FLUSH("")
597 return;
598 }
599
600 // Calculate granularity of analysis (and output).
601 // The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
602 // The CodeHeap can become fairly large, in particular in productive real-life systems.
603 //
604 // It is often neither feasible nor desirable to aggregate the data with the highest possible
605 // level of detail, i.e. inspecting and printing each segment on its own.
606 //
607 // The granularity parameter allows to specify the level of detail available in the analysis.
608 // It must be a positive multiple of the segment size and should be selected such that enough
609 // detail is provided while, at the same time, the printed output does not explode.
610 //
611 // By manipulating the granularity value, we enforce that at least min_granules units
612 // of analysis are available. We also enforce an upper limit of max_granules units to
613 // keep the amount of allocated storage in check.
614 //
615 // Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
642 " Subsequent print functions create their output based on this snapshot.\n"
643 " The CodeHeap is a living thing, and every effort has been made for the\n"
644 " collected data to be consistent. Only the method names and signatures\n"
645 " are retrieved at print time. That may lead to rare cases where the\n"
646 " name of a method is no longer available, e.g. because it was unloaded.\n");
647 ast->print_cr(" CodeHeap committed size " SIZE_FORMAT "K (" SIZE_FORMAT "M), reserved size " SIZE_FORMAT "K (" SIZE_FORMAT "M), %d%% occupied.",
648 size/(size_t)K, size/(size_t)M, res_size/(size_t)K, res_size/(size_t)M, (unsigned int)(100.0*size/res_size));
649 ast->print_cr(" CodeHeap allocation segment size is " SIZE_FORMAT " bytes. This is the smallest possible granularity.", seg_size);
650 ast->print_cr(" CodeHeap (committed part) is mapped to " SIZE_FORMAT " granules of size " SIZE_FORMAT " bytes.", granules, granularity);
651 ast->print_cr(" Each granule takes " SIZE_FORMAT " bytes of C heap, that is " SIZE_FORMAT "K in total for statistics data.", sizeof(StatElement), (sizeof(StatElement)*granules)/(size_t)K);
652 ast->print_cr(" The number of granules is limited to %dk, requiring a granules size of at least %d bytes for a 1GB heap.", (unsigned int)(max_granules/K), (unsigned int)(G/max_granules));
653 BUFFEREDSTREAM_FLUSH("\n")
654
655
656 while (!done) {
657 //---< reset counters with every aggregation >---
658 nBlocks_t1 = 0;
659 nBlocks_t2 = 0;
660 nBlocks_alive = 0;
661 nBlocks_dead = 0;
662 nBlocks_inconstr = 0;
663 nBlocks_unloaded = 0;
664 nBlocks_stub = 0;
665
666 nBlocks_free = 0;
667 nBlocks_used = 0;
668 nBlocks_zomb = 0;
669 nBlocks_disconn = 0;
670 nBlocks_notentr = 0;
671
672 //---< discard old arrays if size does not match >---
673 if (granules != alloc_granules) {
674 discard_StatArray(out);
675 discard_TopSizeArray(out);
676 }
677
678 //---< allocate arrays if they don't yet exist, initialize >---
679 prepare_StatArray(out, granules, granularity, heapName);
680 if (StatArray == NULL) {
681 set_HeapStatGlobals(out, heapName);
682 return;
683 }
684 prepare_TopSizeArray(out, maxTopSizeBlocks, heapName);
685 prepare_SizeDistArray(out, nSizeDistElements, heapName);
686
687 latest_compilation_id = CompileBroker::get_compilation_id();
688 unsigned int highest_compilation_id = 0;
689 size_t usedSpace = 0;
690 size_t t1Space = 0;
691 size_t t2Space = 0;
692 size_t aliveSpace = 0;
693 size_t disconnSpace = 0;
694 size_t notentrSpace = 0;
695 size_t deadSpace = 0;
696 size_t inconstrSpace = 0;
697 size_t unloadedSpace = 0;
698 size_t stubSpace = 0;
699 size_t freeSpace = 0;
700 size_t maxFreeSize = 0;
701 HeapBlock* maxFreeBlock = NULL;
702 bool insane = false;
703
704 int64_t hotnessAccumulator = 0;
705 unsigned int n_methods = 0;
706 avgTemp = 0;
707 minTemp = (int)(res_size > M ? (res_size/M)*2 : 1);
708 maxTemp = -minTemp;
709
710 for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
711 unsigned int hb_len = (unsigned int)h->length(); // despite being size_t, length can never overflow an unsigned int.
712 size_t hb_bytelen = ((size_t)hb_len)<<log2_seg_size;
713 unsigned int ix_beg = (unsigned int)(((char*)h-low_bound)/granule_size);
714 unsigned int ix_end = (unsigned int)(((char*)h-low_bound+(hb_bytelen-1))/granule_size);
715 unsigned int compile_id = 0;
716 CompLevel comp_lvl = CompLevel_none;
738 if (ix_beg > ix_end) {
739 insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
740 }
741 if (insane) {
742 BUFFEREDSTREAM_FLUSH("")
743 continue;
744 }
745
746 if (h->free()) {
747 nBlocks_free++;
748 freeSpace += hb_bytelen;
749 if (hb_bytelen > maxFreeSize) {
750 maxFreeSize = hb_bytelen;
751 maxFreeBlock = h;
752 }
753 } else {
754 update_SizeDistArray(out, hb_len);
755 nBlocks_used++;
756 usedSpace += hb_bytelen;
757 CodeBlob* cb = (CodeBlob*)heap->find_start(h);
758 if (cb != NULL) {
759 cbType = get_cbType(cb);
760 if (cb->is_nmethod()) {
761 compile_id = ((nmethod*)cb)->compile_id();
762 comp_lvl = (CompLevel)((nmethod*)cb)->comp_level();
763 if (((nmethod*)cb)->is_compiled_by_c1()) {
764 cType = c1;
765 }
766 if (((nmethod*)cb)->is_compiled_by_c2()) {
767 cType = c2;
768 }
769 if (((nmethod*)cb)->is_compiled_by_jvmci()) {
770 cType = jvmci;
771 }
772 switch (cbType) {
773 case nMethod_inuse: { // only for executable methods!!!
774 // space for these cbs is accounted for later.
775 int temperature = ((nmethod*)cb)->hotness_counter();
776 hotnessAccumulator += temperature;
777 n_methods++;
778 maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
779 minTemp = (temperature < minTemp) ? temperature : minTemp;
780 break;
781 }
782 case nMethod_notused:
783 nBlocks_alive++;
784 nBlocks_disconn++;
785 aliveSpace += hb_bytelen;
786 disconnSpace += hb_bytelen;
787 break;
788 case nMethod_notentrant: // equivalent to nMethod_alive
789 nBlocks_alive++;
790 nBlocks_notentr++;
791 aliveSpace += hb_bytelen;
792 notentrSpace += hb_bytelen;
793 break;
794 case nMethod_unloaded:
795 nBlocks_unloaded++;
796 unloadedSpace += hb_bytelen;
797 break;
798 case nMethod_dead:
799 nBlocks_dead++;
800 deadSpace += hb_bytelen;
801 break;
802 case nMethod_inconstruction:
803 nBlocks_inconstr++;
804 inconstrSpace += hb_bytelen;
805 break;
806 default:
807 break;
808 }
809 }
810
811 //------------------------------------------
812 //---< register block in TopSizeArray >---
813 //------------------------------------------
814 if (alloc_topSizeBlocks > 0) {
815 if (used_topSizeBlocks == 0) {
816 TopSizeArray[0].start = h;
817 TopSizeArray[0].len = hb_len;
818 TopSizeArray[0].index = tsbStopper;
819 TopSizeArray[0].compiler = cType;
820 TopSizeArray[0].level = comp_lvl;
821 TopSizeArray[0].type = cbType;
822 currMax = hb_len;
823 currMin = hb_len;
824 currMin_ix = 0;
825 used_topSizeBlocks++;
826 // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
827 } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
828 //---< all blocks in list are larger, but there is room left in array >---
829 TopSizeArray[currMin_ix].index = used_topSizeBlocks;
830 TopSizeArray[used_topSizeBlocks].start = h;
831 TopSizeArray[used_topSizeBlocks].len = hb_len;
832 TopSizeArray[used_topSizeBlocks].index = tsbStopper;
833 TopSizeArray[used_topSizeBlocks].compiler = cType;
834 TopSizeArray[used_topSizeBlocks].level = comp_lvl;
835 TopSizeArray[used_topSizeBlocks].type = cbType;
836 currMin = hb_len;
837 currMin_ix = used_topSizeBlocks;
838 used_topSizeBlocks++;
839 } else {
840 // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
841 // We don't need to search the list if we know beforehand that the current block size is
842 // smaller than the currently recorded minimum and there is no free entry left in the list.
843 if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
844 if (currMax < hb_len) {
845 currMax = hb_len;
846 }
847 unsigned int i;
848 unsigned int prev_i = tsbStopper;
849 unsigned int limit_i = 0;
850 for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
851 if (limit_i++ >= alloc_topSizeBlocks) {
852 insane = true; break; // emergency exit
853 }
854 if (i >= used_topSizeBlocks) {
855 insane = true; break; // emergency exit
856 }
857 total_iterations++;
858 if (TopSizeArray[i].len < hb_len) {
859 //---< We want to insert here, element <i> is smaller than the current one >---
860 if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
861 // old entry gets moved to the next free element of the array.
862 // That's necessary to keep the entry for the largest block at index 0.
863 // This move might cause the current minimum to be moved to another place
864 if (i == currMin_ix) {
865 assert(TopSizeArray[i].len == currMin, "sort error");
866 currMin_ix = used_topSizeBlocks;
867 }
868 memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
869 TopSizeArray[i].start = h;
870 TopSizeArray[i].len = hb_len;
871 TopSizeArray[i].index = used_topSizeBlocks;
872 TopSizeArray[i].compiler = cType;
873 TopSizeArray[i].level = comp_lvl;
874 TopSizeArray[i].type = cbType;
875 used_topSizeBlocks++;
876 } else { // no room for new entries, current block replaces entry for smallest block
877 //---< Find last entry (entry for smallest remembered block) >---
878 unsigned int j = i;
879 unsigned int prev_j = tsbStopper;
880 unsigned int limit_j = 0;
881 while (TopSizeArray[j].index != tsbStopper) {
882 if (limit_j++ >= alloc_topSizeBlocks) {
883 insane = true; break; // emergency exit
884 }
885 if (j >= used_topSizeBlocks) {
886 insane = true; break; // emergency exit
887 }
888 total_iterations++;
889 prev_j = j;
890 j = TopSizeArray[j].index;
891 }
892 if (!insane) {
893 if (prev_j == tsbStopper) {
894 //---< Above while loop did not iterate, we already are the min entry >---
895 //---< We have to just replace the smallest entry >---
896 currMin = hb_len;
897 currMin_ix = j;
898 TopSizeArray[j].start = h;
899 TopSizeArray[j].len = hb_len;
900 TopSizeArray[j].index = tsbStopper; // already set!!
901 TopSizeArray[j].compiler = cType;
902 TopSizeArray[j].level = comp_lvl;
903 TopSizeArray[j].type = cbType;
904 } else {
905 //---< second-smallest entry is now smallest >---
906 TopSizeArray[prev_j].index = tsbStopper;
907 currMin = TopSizeArray[prev_j].len;
908 currMin_ix = prev_j;
909 //---< smallest entry gets overwritten >---
910 memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
911 TopSizeArray[i].start = h;
912 TopSizeArray[i].len = hb_len;
913 TopSizeArray[i].index = j;
914 TopSizeArray[i].compiler = cType;
915 TopSizeArray[i].level = comp_lvl;
916 TopSizeArray[i].type = cbType;
917 }
918 } // insane
919 }
920 break;
921 }
922 prev_i = i;
923 }
924 if (insane) {
925 // Note: regular analysis could probably continue by resetting "insane" flag.
926 out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
927 discard_TopSizeArray(out);
928 }
929 }
930 }
931 }
932 //----------------------------------------------
933 //---< END register block in TopSizeArray >---
934 //----------------------------------------------
935 } else {
936 nBlocks_zomb++;
937 }
938
939 if (ix_beg == ix_end) {
940 StatArray[ix_beg].type = cbType;
941 switch (cbType) {
942 case nMethod_inuse:
943 highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
944 if (comp_lvl < CompLevel_full_optimization) {
945 nBlocks_t1++;
946 t1Space += hb_bytelen;
947 StatArray[ix_beg].t1_count++;
948 StatArray[ix_beg].t1_space += (unsigned short)hb_len;
949 StatArray[ix_beg].t1_age = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
950 } else {
951 nBlocks_t2++;
952 t2Space += hb_bytelen;
953 StatArray[ix_beg].t2_count++;
954 StatArray[ix_beg].t2_space += (unsigned short)hb_len;
955 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
956 }
957 StatArray[ix_beg].level = comp_lvl;
958 StatArray[ix_beg].compiler = cType;
959 break;
960 case nMethod_inconstruction: // let's count "in construction" nmethods here.
961 case nMethod_alive:
962 StatArray[ix_beg].tx_count++;
963 StatArray[ix_beg].tx_space += (unsigned short)hb_len;
964 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
965 StatArray[ix_beg].level = comp_lvl;
966 StatArray[ix_beg].compiler = cType;
967 break;
968 case nMethod_dead:
969 case nMethod_unloaded:
970 StatArray[ix_beg].dead_count++;
971 StatArray[ix_beg].dead_space += (unsigned short)hb_len;
972 break;
973 default:
974 // must be a stub, if it's not a dead or alive nMethod
975 nBlocks_stub++;
976 stubSpace += hb_bytelen;
977 StatArray[ix_beg].stub_count++;
978 StatArray[ix_beg].stub_space += (unsigned short)hb_len;
979 break;
980 }
997
998 StatArray[ix_end].t1_count++;
999 StatArray[ix_end].t1_space += (unsigned short)end_space;
1000 StatArray[ix_end].t1_age = StatArray[ix_end].t1_age < compile_id ? compile_id : StatArray[ix_end].t1_age;
1001 } else {
1002 nBlocks_t2++;
1003 t2Space += hb_bytelen;
1004 StatArray[ix_beg].t2_count++;
1005 StatArray[ix_beg].t2_space += (unsigned short)beg_space;
1006 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
1007
1008 StatArray[ix_end].t2_count++;
1009 StatArray[ix_end].t2_space += (unsigned short)end_space;
1010 StatArray[ix_end].t2_age = StatArray[ix_end].t2_age < compile_id ? compile_id : StatArray[ix_end].t2_age;
1011 }
1012 StatArray[ix_beg].level = comp_lvl;
1013 StatArray[ix_beg].compiler = cType;
1014 StatArray[ix_end].level = comp_lvl;
1015 StatArray[ix_end].compiler = cType;
1016 break;
1017 case nMethod_inconstruction: // let's count "in construction" nmethods here.
1018 case nMethod_alive:
1019 StatArray[ix_beg].tx_count++;
1020 StatArray[ix_beg].tx_space += (unsigned short)beg_space;
1021 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
1022
1023 StatArray[ix_end].tx_count++;
1024 StatArray[ix_end].tx_space += (unsigned short)end_space;
1025 StatArray[ix_end].tx_age = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
1026
1027 StatArray[ix_beg].level = comp_lvl;
1028 StatArray[ix_beg].compiler = cType;
1029 StatArray[ix_end].level = comp_lvl;
1030 StatArray[ix_end].compiler = cType;
1031 break;
1032 case nMethod_dead:
1033 case nMethod_unloaded:
1034 StatArray[ix_beg].dead_count++;
1035 StatArray[ix_beg].dead_space += (unsigned short)beg_space;
1036 StatArray[ix_end].dead_count++;
1037 StatArray[ix_end].dead_space += (unsigned short)end_space;
1045 StatArray[ix_end].stub_count++;
1046 StatArray[ix_end].stub_space += (unsigned short)end_space;
1047 break;
1048 }
1049 for (unsigned int ix = ix_beg+1; ix < ix_end; ix++) {
1050 StatArray[ix].type = cbType;
1051 switch (cbType) {
1052 case nMethod_inuse:
1053 if (comp_lvl < CompLevel_full_optimization) {
1054 StatArray[ix].t1_count++;
1055 StatArray[ix].t1_space += (unsigned short)(granule_size>>log2_seg_size);
1056 StatArray[ix].t1_age = StatArray[ix].t1_age < compile_id ? compile_id : StatArray[ix].t1_age;
1057 } else {
1058 StatArray[ix].t2_count++;
1059 StatArray[ix].t2_space += (unsigned short)(granule_size>>log2_seg_size);
1060 StatArray[ix].t2_age = StatArray[ix].t2_age < compile_id ? compile_id : StatArray[ix].t2_age;
1061 }
1062 StatArray[ix].level = comp_lvl;
1063 StatArray[ix].compiler = cType;
1064 break;
1065 case nMethod_inconstruction: // let's count "in construction" nmethods here.
1066 case nMethod_alive:
1067 StatArray[ix].tx_count++;
1068 StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
1069 StatArray[ix].tx_age = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
1070 StatArray[ix].level = comp_lvl;
1071 StatArray[ix].compiler = cType;
1072 break;
1073 case nMethod_dead:
1074 case nMethod_unloaded:
1075 StatArray[ix].dead_count++;
1076 StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
1077 break;
1078 default:
1079 // must be a stub, if it's not a dead or alive nMethod
1080 StatArray[ix].stub_count++;
1081 StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
1082 break;
1083 }
1084 }
1085 }
1086 }
1087 }
1088 done = true;
1089
1090 if (!insane) {
1091 // There is a risk for this block (because it contains many print statements) to get
1092 // interspersed with print data from other threads. We take this risk intentionally.
1093 // Getting stalled waiting for tty_lock while holding the CodeCache_lock is not desirable.
1094 printBox(ast, '-', "Global CodeHeap statistics for segment ", heapName);
1095 ast->print_cr("freeSpace = " SIZE_FORMAT_W(8) "k, nBlocks_free = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", freeSpace/(size_t)K, nBlocks_free, (100.0*freeSpace)/size, (100.0*freeSpace)/res_size);
1096 ast->print_cr("usedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_used = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", usedSpace/(size_t)K, nBlocks_used, (100.0*usedSpace)/size, (100.0*usedSpace)/res_size);
1097 ast->print_cr(" Tier1 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t1 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t1Space/(size_t)K, nBlocks_t1, (100.0*t1Space)/size, (100.0*t1Space)/res_size);
1098 ast->print_cr(" Tier2 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t2 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t2Space/(size_t)K, nBlocks_t2, (100.0*t2Space)/size, (100.0*t2Space)/res_size);
1099 ast->print_cr(" Alive Space = " SIZE_FORMAT_W(8) "k, nBlocks_alive = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K, nBlocks_alive, (100.0*aliveSpace)/size, (100.0*aliveSpace)/res_size);
1100 ast->print_cr(" disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K, nBlocks_disconn, (100.0*disconnSpace)/size, (100.0*disconnSpace)/res_size);
1101 ast->print_cr(" not entrant = " SIZE_FORMAT_W(8) "k, nBlocks_notentr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K, nBlocks_notentr, (100.0*notentrSpace)/size, (100.0*notentrSpace)/res_size);
1102 ast->print_cr(" inconstrSpace = " SIZE_FORMAT_W(8) "k, nBlocks_inconstr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", inconstrSpace/(size_t)K, nBlocks_inconstr, (100.0*inconstrSpace)/size, (100.0*inconstrSpace)/res_size);
1103 ast->print_cr(" unloadedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
1104 ast->print_cr(" deadSpace = " SIZE_FORMAT_W(8) "k, nBlocks_dead = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K, nBlocks_dead, (100.0*deadSpace)/size, (100.0*deadSpace)/res_size);
1105 ast->print_cr(" stubSpace = " SIZE_FORMAT_W(8) "k, nBlocks_stub = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K, nBlocks_stub, (100.0*stubSpace)/size, (100.0*stubSpace)/res_size);
1106 ast->print_cr("ZombieBlocks = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
1107 ast->cr();
1108 ast->print_cr("Segment start = " INTPTR_FORMAT ", used space = " SIZE_FORMAT_W(8)"k", p2i(low_bound), size/K);
1109 ast->print_cr("Segment end (used) = " INTPTR_FORMAT ", remaining space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + size, (res_size - size)/K);
1110 ast->print_cr("Segment end (reserved) = " INTPTR_FORMAT ", reserved space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + res_size, res_size/K);
1111 ast->cr();
1112 ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
1113 ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
1114 ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
1115 ast->cr();
1116
1117 int reset_val = NMethodSweeper::hotness_counter_reset_val();
1118 double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
1119 printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
1120 ast->print_cr("Highest possible method temperature: %12d", reset_val);
1121 ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
1122 if (n_methods > 0) {
1260 ast->print_cr("Free block count mismatch could not be resolved.");
1261 ast->print_cr("Try to run \"aggregate\" function to update counters");
1262 }
1263 BUFFEREDSTREAM_FLUSH("")
1264
1265 //---< discard old array and update global values >---
1266 discard_FreeArray(out);
1267 set_HeapStatGlobals(out, heapName);
1268 return;
1269 }
1270
1271 //---< calculate and fill remaining fields >---
1272 if (FreeArray != NULL) {
1273 // This loop is intentionally printing directly to "out".
1274 // It should not print anything, anyway.
1275 for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
1276 size_t lenSum = 0;
1277 FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
1278 for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
1279 CodeBlob *cb = (CodeBlob*)(heap->find_start(h));
1280 if ((cb != NULL) && !cb->is_nmethod()) {
1281 FreeArray[ix].stubs_in_gap = true;
1282 }
1283 FreeArray[ix].n_gapBlocks++;
1284 lenSum += h->length()<<log2_seg_size;
1285 if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
1286 out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
1287 }
1288 }
1289 if (lenSum != FreeArray[ix].gap) {
1290 out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
1291 }
1292 }
1293 }
1294 set_HeapStatGlobals(out, heapName);
1295
1296 printBox(ast, '=', "C O D E H E A P A N A L Y S I S C O M P L E T E for segment ", heapName);
1297 BUFFEREDSTREAM_FLUSH("\n")
1298 }
1299
1300
1312 BUFFEREDSTREAM_DECL(ast, out)
1313
1314 {
1315 printBox(ast, '=', "U S E D S P A C E S T A T I S T I C S for ", heapName);
1316 ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
1317 " and other identifying information with the block size data.\n"
1318 "\n"
1319 " Method names are dynamically retrieved from the code cache at print time.\n"
1320 " Due to the living nature of the code cache and because the CodeCache_lock\n"
1321 " is not continuously held, the displayed name might be wrong or no name\n"
1322 " might be found at all. The likelihood for that to happen increases\n"
1323 " over time passed between analysis and print step.\n", used_topSizeBlocks);
1324 BUFFEREDSTREAM_FLUSH_LOCKED("\n")
1325 }
1326
1327 //----------------------------
1328 //-- Print Top Used Blocks --
1329 //----------------------------
1330 {
1331 char* low_bound = heap->low_boundary();
1332 bool have_CodeCache_lock = CodeCache_lock->owned_by_self();
1333
1334 printBox(ast, '-', "Largest Used Blocks in ", heapName);
1335 print_blobType_legend(ast);
1336
1337 ast->fill_to(51);
1338 ast->print("%4s", "blob");
1339 ast->fill_to(56);
1340 ast->print("%9s", "compiler");
1341 ast->fill_to(66);
1342 ast->print_cr("%6s", "method");
1343 ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name");
1344 BUFFEREDSTREAM_FLUSH_LOCKED("")
1345
1346 //---< print Top Ten Used Blocks >---
1347 if (used_topSizeBlocks > 0) {
1348 unsigned int printed_topSizeBlocks = 0;
1349 for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
1350 printed_topSizeBlocks++;
1351 nmethod* nm = NULL;
1352 const char* blob_name = "unnamed blob or blob name unavailable";
1353 // heap->find_start() is safe. Only works on _segmap.
1354 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
1355 HeapBlock* heapBlock = TopSizeArray[i].start;
1356 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(heapBlock));
1357 bool blob_is_safe = blob_access_is_safe(this_blob, NULL);
1358 if (blob_is_safe) {
1359 //---< access these fields only if we own the CodeCache_lock >---
1360 if (have_CodeCache_lock) {
1361 blob_name = this_blob->name();
1362 nm = this_blob->as_nmethod_or_null();
1363 }
1364 //---< blob address >---
1365 ast->print(INTPTR_FORMAT, p2i(this_blob));
1366 ast->fill_to(19);
1367 //---< blob offset from CodeHeap begin >---
1368 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
1369 ast->fill_to(33);
1370 } else {
1371 //---< block address >---
1372 ast->print(INTPTR_FORMAT, p2i(TopSizeArray[i].start));
1373 ast->fill_to(19);
1374 //---< block offset from CodeHeap begin >---
1375 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
1376 ast->fill_to(33);
1377 }
1378
1379 //---< print size, name, and signature (for nMethods) >---
1380 // access nmethod and Method fields only if we own the CodeCache_lock.
1381 // This fact is implicitly transported via nm != NULL.
1382 if (CompiledMethod::nmethod_access_is_safe(nm)) {
1383 ResourceMark rm;
1384 Method* method = nm->method();
1385 if (nm->is_in_use()) {
1386 blob_name = method->name_and_sig_as_C_string();
1387 }
1388 if (nm->is_not_entrant()) {
1389 blob_name = method->name_and_sig_as_C_string();
1390 }
1391 //---< nMethod size in hex >---
1392 unsigned int total_size = nm->total_size();
1393 ast->print(PTR32_FORMAT, total_size);
1394 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
1395 ast->fill_to(51);
1396 ast->print(" %c", blobTypeChar[TopSizeArray[i].type]);
1397 //---< compiler information >---
1398 ast->fill_to(56);
1399 ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
1400 //---< method temperature >---
1401 ast->fill_to(67);
1402 ast->print("%5d", nm->hotness_counter());
1403 //---< name and signature >---
1404 ast->fill_to(67+6);
1405 if (nm->is_not_installed()) {
1406 ast->print(" not (yet) installed method ");
1407 }
1408 if (nm->is_zombie()) {
1409 ast->print(" zombie method ");
1410 }
1411 ast->print("%s", blob_name);
1412 } else {
1413 //---< block size in hex >---
1414 ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
1415 ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
1416 //---< no compiler information >---
1417 ast->fill_to(56);
1418 //---< name and signature >---
1419 ast->fill_to(67+6);
1420 ast->print("%s", blob_name);
1421 }
1422 ast->cr();
1423 BUFFEREDSTREAM_FLUSH_AUTO("")
1424 }
1425 if (used_topSizeBlocks != printed_topSizeBlocks) {
1426 ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
1427 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
1428 ast->print_cr(" TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
1429 BUFFEREDSTREAM_FLUSH_AUTO("")
1430 }
1431 }
1432 BUFFEREDSTREAM_FLUSH("\n\n")
1433 }
1434 }
1435
1436 //-----------------------------
1437 //-- Print Usage Histogram --
1438 //-----------------------------
1439
1440 if (SizeDistributionArray != NULL) {
2181 }
2182
2183
2184 void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
2185 if (!initialization_complete) {
2186 return;
2187 }
2188
2189 const char* heapName = get_heapName(heap);
2190 get_HeapStatGlobals(out, heapName);
2191
2192 if ((StatArray == NULL) || (alloc_granules == 0)) {
2193 return;
2194 }
2195 BUFFEREDSTREAM_DECL(ast, out)
2196
2197 unsigned int granules_per_line = 128;
2198 char* low_bound = heap->low_boundary();
2199 CodeBlob* last_blob = NULL;
2200 bool name_in_addr_range = true;
2201 bool have_CodeCache_lock = CodeCache_lock->owned_by_self();
2202
2203 //---< print at least 128K per block (i.e. between headers) >---
2204 if (granules_per_line*granule_size < 128*K) {
2205 granules_per_line = (unsigned int)((128*K)/granule_size);
2206 }
2207
2208 printBox(ast, '=', "M E T H O D N A M E S for ", heapName);
2209 ast->print_cr(" Method names are dynamically retrieved from the code cache at print time.\n"
2210 " Due to the living nature of the code heap and because the CodeCache_lock\n"
2211 " is not continuously held, the displayed name might be wrong or no name\n"
2212 " might be found at all. The likelihood for that to happen increases\n"
2213 " over time passed between aggregtion and print steps.\n");
2214 BUFFEREDSTREAM_FLUSH_LOCKED("")
2215
2216 for (unsigned int ix = 0; ix < alloc_granules; ix++) {
2217 //---< print a new blob on a new line >---
2218 if (ix%granules_per_line == 0) {
2219 if (!name_in_addr_range) {
2220 ast->print_cr("No methods, blobs, or stubs found in this address range");
2221 }
2222 name_in_addr_range = false;
2223
2224 size_t end_ix = (ix+granules_per_line <= alloc_granules) ? ix+granules_per_line : alloc_granules;
2225 ast->cr();
2226 ast->print_cr("--------------------------------------------------------------------");
2227 ast->print_cr("Address range [" INTPTR_FORMAT "," INTPTR_FORMAT "), " SIZE_FORMAT "k", p2i(low_bound+ix*granule_size), p2i(low_bound + end_ix*granule_size), (end_ix - ix)*granule_size/(size_t)K);
2228 ast->print_cr("--------------------------------------------------------------------");
2229 BUFFEREDSTREAM_FLUSH_AUTO("")
2230 }
2231 // Only check granule if it contains at least one blob.
2232 unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
2233 StatArray[ix].stub_count + StatArray[ix].dead_count;
2234 if (nBlobs > 0 ) {
2235 for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
2236 // heap->find_start() is safe. Only works on _segmap.
2237 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
2238 char* this_seg = low_bound + ix*granule_size + is;
2239 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(this_seg));
2240 bool blob_is_safe = blob_access_is_safe(this_blob, NULL);
2241 // blob could have been flushed, freed, and merged.
2242 // this_blob < last_blob is an indicator for that.
2243 if (blob_is_safe && (this_blob > last_blob)) {
2244 last_blob = this_blob;
2245
2246 //---< get type and name >---
2247 blobType cbType = noType;
2248 if (segment_granules) {
2249 cbType = (blobType)StatArray[ix].type;
2250 } else {
2251 //---< access these fields only if we own the CodeCache_lock >---
2252 if (have_CodeCache_lock) {
2253 cbType = get_cbType(this_blob);
2254 }
2255 }
2256
2257 //---< access these fields only if we own the CodeCache_lock >---
2258 const char* blob_name = "<unavailable>";
2259 nmethod* nm = NULL;
2260 if (have_CodeCache_lock) {
2261 blob_name = this_blob->name();
2262 nm = this_blob->as_nmethod_or_null();
2263 // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
2264 if ((blob_name == NULL) || !os::is_readable_pointer(blob_name)) {
2265 blob_name = "<unavailable>";
2266 }
2267 }
2268
2269 //---< print table header for new print range >---
2270 if (!name_in_addr_range) {
2271 name_in_addr_range = true;
2272 ast->fill_to(51);
2273 ast->print("%9s", "compiler");
2274 ast->fill_to(61);
2275 ast->print_cr("%6s", "method");
2276 ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
2277 BUFFEREDSTREAM_FLUSH_AUTO("")
2278 }
2279
2280 //---< print line prefix (address and offset from CodeHeap start) >---
2281 ast->print(INTPTR_FORMAT, p2i(this_blob));
2282 ast->fill_to(19);
2283 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
2284 ast->fill_to(33);
2285
2286 // access nmethod and Method fields only if we own the CodeCache_lock.
2287 // This fact is implicitly transported via nm != NULL.
2288 if (CompiledMethod::nmethod_access_is_safe(nm)) {
2289 Method* method = nm->method();
2290 ResourceMark rm;
2291 //---< collect all data to locals as quickly as possible >---
2292 unsigned int total_size = nm->total_size();
2293 int hotness = nm->hotness_counter();
2294 bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
2295 //---< nMethod size in hex >---
2296 ast->print(PTR32_FORMAT, total_size);
2297 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
2298 //---< compiler information >---
2299 ast->fill_to(51);
2300 ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
2301 //---< method temperature >---
2302 ast->fill_to(62);
2303 ast->print("%5d", hotness);
2304 //---< name and signature >---
2305 ast->fill_to(62+6);
2306 ast->print("%s", blobTypeName[cbType]);
2307 ast->fill_to(82+6);
2308 if (cbType == nMethod_dead) {
2474 ast->print("|");
2475 }
2476 ast->cr();
2477
2478 // can't use BUFFEREDSTREAM_FLUSH_IF("", 512) here.
2479 // can't use this expression. bufferedStream::capacity() does not exist.
2480 // if ((ast->capacity() - ast->size()) < 512) {
2481 // Assume instead that default bufferedStream capacity (4K) was used.
2482 if (ast->size() > 3*K) {
2483 ttyLocker ttyl;
2484 out->print("%s", ast->as_string());
2485 ast->reset();
2486 }
2487
2488 ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
2489 ast->fill_to(19);
2490 ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
2491 }
2492 }
2493
2494 CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
2495 if ((cb != NULL) && os::is_readable_pointer(cb)) {
2496 if (cb->is_runtime_stub()) return runtimeStub;
2497 if (cb->is_deoptimization_stub()) return deoptimizationStub;
2498 if (cb->is_uncommon_trap_stub()) return uncommonTrapStub;
2499 if (cb->is_exception_stub()) return exceptionStub;
2500 if (cb->is_safepoint_stub()) return safepointStub;
2501 if (cb->is_adapter_blob()) return adapterBlob;
2502 if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
2503 if (cb->is_buffer_blob()) return bufferBlob;
2504
2505 //---< access these fields only if we own the CodeCache_lock >---
2506 // Should be ensured by caller. aggregate() amd print_names() do that.
2507 if (CodeCache_lock->owned_by_self()) {
2508 nmethod* nm = cb->as_nmethod_or_null();
2509 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
2510 if (nm->is_not_installed()) return nMethod_inconstruction;
2511 if (nm->is_zombie()) return nMethod_dead;
2512 if (nm->is_unloaded()) return nMethod_unloaded;
2513 if (nm->is_in_use()) return nMethod_inuse;
2514 if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused;
2515 if (nm->is_alive()) return nMethod_alive;
2516 return nMethod_dead;
2517 }
2518 }
2519 }
2520 return noType;
2521 }
2522
2523 bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob, CodeBlob* prev_blob) {
2524 return (this_blob != NULL) && // a blob must have been found, obviously
2525 ((this_blob == prev_blob) || (prev_blob == NULL)) && // when re-checking, the same blob must have been found
2526 (this_blob->header_size() >= 0) &&
2527 (this_blob->relocation_size() >= 0) &&
2528 ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
2529 ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin())) &&
2530 os::is_readable_pointer((address)(this_blob->relocation_begin())) &&
2531 os::is_readable_pointer(this_blob->content_begin());
2532 }
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "runtime/safepoint.hpp"
30 #include "runtime/sweeper.hpp"
31
32 // -------------------------
33 // | General Description |
34 // -------------------------
35 // The CodeHeap state analytics are divided in two parts.
36 // The first part examines the entire CodeHeap and aggregates all
37 // information that is believed useful/important.
38 //
39 // Aggregation condenses the information of a piece of the CodeHeap
40 // (4096 bytes by default) into an analysis granule. These granules
41 // contain enough detail to gain initial insight while keeping the
42 // internal sttructure sizes in check.
43 //
44 // The second part, which consists of several, independent steps,
45 // prints the previously collected information with emphasis on
46 // various aspects.
47 //
48 // The CodeHeap is a living thing. Therefore, protection against concurrent
49 // modification (by acquiring the CodeCache_lock) is necessary. It has
196 #define BUFFEREDSTREAM_FLUSH(_termString) \
197 if (((_termString) != NULL) && (strlen(_termString) > 0)){\
198 _outbuf->print("%s", _termString); \
199 }
200
201 #define BUFFEREDSTREAM_FLUSH_IF(_termString, _remSize) \
202 BUFFEREDSTREAM_FLUSH(_termString)
203
204 #define BUFFEREDSTREAM_FLUSH_AUTO(_termString) \
205 BUFFEREDSTREAM_FLUSH(_termString)
206
207 #define BUFFEREDSTREAM_FLUSH_LOCKED(_termString) \
208 BUFFEREDSTREAM_FLUSH(_termString)
209
210 #define BUFFEREDSTREAM_FLUSH_STAT()
211 #endif
212 #define HEX32_FORMAT "0x%x" // just a helper format string used below multiple times
213
214 const char blobTypeChar[] = {' ', 'C', 'N', 'I', 'X', 'Z', 'U', 'R', '?', 'D', 'T', 'E', 'S', 'A', 'M', 'B', 'L' };
215 const char* blobTypeName[] = {"noType"
216 , "nMethod (under construction), cannot be observed"
217 , "nMethod (active)"
218 , "nMethod (inactive)"
219 , "nMethod (deopt)"
220 , "nMethod (zombie)"
221 , "nMethod (unloaded)"
222 , "runtime stub"
223 , "ricochet stub"
224 , "deopt stub"
225 , "uncommon trap stub"
226 , "exception stub"
227 , "safepoint stub"
228 , "adapter blob"
229 , "MH adapter blob"
230 , "buffer blob"
231 , "lastType"
232 };
233 const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
234
235 // Be prepared for ten different CodeHeap segments. Should be enough for a few years.
236 const unsigned int nSizeDistElements = 31; // logarithmic range growth, max size: 2**32
237 const unsigned int maxTopSizeBlocks = 100;
238 const unsigned int tsbStopper = 2 * maxTopSizeBlocks;
239 const unsigned int maxHeaps = 10;
240 static unsigned int nHeaps = 0;
241 static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
242
243 // static struct StatElement *StatArray = NULL;
244 static StatElement* StatArray = NULL;
245 static int log2_seg_size = 0;
246 static size_t seg_size = 0;
247 static size_t alloc_granules = 0;
248 static size_t granule_size = 0;
249 static bool segment_granules = false;
250 static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only.
251 static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only.
252 static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only.
253 static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only.
254 static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state.
255 static unsigned int nBlocks_stub = 0;
256
257 static struct FreeBlk* FreeArray = NULL;
258 static unsigned int alloc_freeBlocks = 0;
259
260 static struct TopSizeBlk* TopSizeArray = NULL;
261 static unsigned int alloc_topSizeBlocks = 0;
262 static unsigned int used_topSizeBlocks = 0;
263
264 static struct SizeDistributionElement* SizeDistributionArray = NULL;
265
266 // nMethod temperature (hotness) indicators.
267 static int avgTemp = 0;
268 static int maxTemp = 0;
269 static int minTemp = 0;
270
271 static unsigned int latest_compilation_id = 0;
272 static volatile bool initialization_complete = false;
273
304 } else {
305 nHeaps = 1;
306 CodeHeapStatArray[0].heapName = heapName;
307 return 0; // This is the default index if CodeCache is not segmented.
308 }
309 }
310
311 void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) {
312 unsigned int ix = findHeapIndex(out, heapName);
313 if (ix < maxHeaps) {
314 StatArray = CodeHeapStatArray[ix].StatArray;
315 seg_size = CodeHeapStatArray[ix].segment_size;
316 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size);
317 alloc_granules = CodeHeapStatArray[ix].alloc_granules;
318 granule_size = CodeHeapStatArray[ix].granule_size;
319 segment_granules = CodeHeapStatArray[ix].segment_granules;
320 nBlocks_t1 = CodeHeapStatArray[ix].nBlocks_t1;
321 nBlocks_t2 = CodeHeapStatArray[ix].nBlocks_t2;
322 nBlocks_alive = CodeHeapStatArray[ix].nBlocks_alive;
323 nBlocks_dead = CodeHeapStatArray[ix].nBlocks_dead;
324 nBlocks_unloaded = CodeHeapStatArray[ix].nBlocks_unloaded;
325 nBlocks_stub = CodeHeapStatArray[ix].nBlocks_stub;
326 FreeArray = CodeHeapStatArray[ix].FreeArray;
327 alloc_freeBlocks = CodeHeapStatArray[ix].alloc_freeBlocks;
328 TopSizeArray = CodeHeapStatArray[ix].TopSizeArray;
329 alloc_topSizeBlocks = CodeHeapStatArray[ix].alloc_topSizeBlocks;
330 used_topSizeBlocks = CodeHeapStatArray[ix].used_topSizeBlocks;
331 SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
332 avgTemp = CodeHeapStatArray[ix].avgTemp;
333 maxTemp = CodeHeapStatArray[ix].maxTemp;
334 minTemp = CodeHeapStatArray[ix].minTemp;
335 } else {
336 StatArray = NULL;
337 seg_size = 0;
338 log2_seg_size = 0;
339 alloc_granules = 0;
340 granule_size = 0;
341 segment_granules = false;
342 nBlocks_t1 = 0;
343 nBlocks_t2 = 0;
344 nBlocks_alive = 0;
345 nBlocks_dead = 0;
346 nBlocks_unloaded = 0;
347 nBlocks_stub = 0;
348 FreeArray = NULL;
349 alloc_freeBlocks = 0;
350 TopSizeArray = NULL;
351 alloc_topSizeBlocks = 0;
352 used_topSizeBlocks = 0;
353 SizeDistributionArray = NULL;
354 avgTemp = 0;
355 maxTemp = 0;
356 minTemp = 0;
357 }
358 }
359
360 void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) {
361 unsigned int ix = findHeapIndex(out, heapName);
362 if (ix < maxHeaps) {
363 CodeHeapStatArray[ix].StatArray = StatArray;
364 CodeHeapStatArray[ix].segment_size = seg_size;
365 CodeHeapStatArray[ix].alloc_granules = alloc_granules;
366 CodeHeapStatArray[ix].granule_size = granule_size;
367 CodeHeapStatArray[ix].segment_granules = segment_granules;
368 CodeHeapStatArray[ix].nBlocks_t1 = nBlocks_t1;
369 CodeHeapStatArray[ix].nBlocks_t2 = nBlocks_t2;
370 CodeHeapStatArray[ix].nBlocks_alive = nBlocks_alive;
371 CodeHeapStatArray[ix].nBlocks_dead = nBlocks_dead;
372 CodeHeapStatArray[ix].nBlocks_unloaded = nBlocks_unloaded;
373 CodeHeapStatArray[ix].nBlocks_stub = nBlocks_stub;
374 CodeHeapStatArray[ix].FreeArray = FreeArray;
375 CodeHeapStatArray[ix].alloc_freeBlocks = alloc_freeBlocks;
376 CodeHeapStatArray[ix].TopSizeArray = TopSizeArray;
377 CodeHeapStatArray[ix].alloc_topSizeBlocks = alloc_topSizeBlocks;
378 CodeHeapStatArray[ix].used_topSizeBlocks = used_topSizeBlocks;
379 CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
380 CodeHeapStatArray[ix].avgTemp = avgTemp;
381 CodeHeapStatArray[ix].maxTemp = maxTemp;
382 CodeHeapStatArray[ix].minTemp = minTemp;
383 }
384 }
385
386 //---< get a new statistics array >---
387 void CodeHeapState::prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName) {
388 if (StatArray == NULL) {
389 StatArray = new StatElement[nElem];
390 //---< reset some counts >---
391 alloc_granules = nElem;
478
479 void CodeHeapState::discard_StatArray(outputStream* out) {
480 if (StatArray != NULL) {
481 delete StatArray;
482 StatArray = NULL;
483 alloc_granules = 0;
484 granule_size = 0;
485 }
486 }
487
488 void CodeHeapState::discard_FreeArray(outputStream* out) {
489 if (FreeArray != NULL) {
490 delete[] FreeArray;
491 FreeArray = NULL;
492 alloc_freeBlocks = 0;
493 }
494 }
495
496 void CodeHeapState::discard_TopSizeArray(outputStream* out) {
497 if (TopSizeArray != NULL) {
498 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
499 if (TopSizeArray[i].blob_name != NULL) {
500 os::free((void*)TopSizeArray[i].blob_name);
501 }
502 }
503 delete[] TopSizeArray;
504 TopSizeArray = NULL;
505 alloc_topSizeBlocks = 0;
506 used_topSizeBlocks = 0;
507 }
508 }
509
510 void CodeHeapState::discard_SizeDistArray(outputStream* out) {
511 if (SizeDistributionArray != NULL) {
512 delete[] SizeDistributionArray;
513 SizeDistributionArray = NULL;
514 }
515 }
516
517 // Discard all allocated internal data structures.
518 // This should be done after an analysis session is completed.
519 void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
520 if (!initialization_complete) {
521 return;
522 }
576 BUFFEREDSTREAM_FLUSH("")
577 }
578 get_HeapStatGlobals(out, heapName);
579
580
581 // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
582 // all heap information is "constant" and can be safely extracted/calculated before we
583 // enter the while() loop. Actually, the loop will only be iterated once.
584 char* low_bound = heap->low_boundary();
585 size_t size = heap->capacity();
586 size_t res_size = heap->max_capacity();
587 seg_size = heap->segment_size();
588 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size); // This is a global static value.
589
590 if (seg_size == 0) {
591 printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
592 BUFFEREDSTREAM_FLUSH("")
593 return;
594 }
595
596 if (!holding_required_locks()) {
597 printBox(ast, '-', "Must be at safepoint or hold Compile_lock and CodeCache_lock when calling aggregate function for ", heapName);
598 BUFFEREDSTREAM_FLUSH("")
599 return;
600 }
601
602 // Calculate granularity of analysis (and output).
603 // The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
604 // The CodeHeap can become fairly large, in particular in productive real-life systems.
605 //
606 // It is often neither feasible nor desirable to aggregate the data with the highest possible
607 // level of detail, i.e. inspecting and printing each segment on its own.
608 //
609 // The granularity parameter allows to specify the level of detail available in the analysis.
610 // It must be a positive multiple of the segment size and should be selected such that enough
611 // detail is provided while, at the same time, the printed output does not explode.
612 //
613 // By manipulating the granularity value, we enforce that at least min_granules units
614 // of analysis are available. We also enforce an upper limit of max_granules units to
615 // keep the amount of allocated storage in check.
616 //
617 // Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
644 " Subsequent print functions create their output based on this snapshot.\n"
645 " The CodeHeap is a living thing, and every effort has been made for the\n"
646 " collected data to be consistent. Only the method names and signatures\n"
647 " are retrieved at print time. That may lead to rare cases where the\n"
648 " name of a method is no longer available, e.g. because it was unloaded.\n");
649 ast->print_cr(" CodeHeap committed size " SIZE_FORMAT "K (" SIZE_FORMAT "M), reserved size " SIZE_FORMAT "K (" SIZE_FORMAT "M), %d%% occupied.",
650 size/(size_t)K, size/(size_t)M, res_size/(size_t)K, res_size/(size_t)M, (unsigned int)(100.0*size/res_size));
651 ast->print_cr(" CodeHeap allocation segment size is " SIZE_FORMAT " bytes. This is the smallest possible granularity.", seg_size);
652 ast->print_cr(" CodeHeap (committed part) is mapped to " SIZE_FORMAT " granules of size " SIZE_FORMAT " bytes.", granules, granularity);
653 ast->print_cr(" Each granule takes " SIZE_FORMAT " bytes of C heap, that is " SIZE_FORMAT "K in total for statistics data.", sizeof(StatElement), (sizeof(StatElement)*granules)/(size_t)K);
654 ast->print_cr(" The number of granules is limited to %dk, requiring a granules size of at least %d bytes for a 1GB heap.", (unsigned int)(max_granules/K), (unsigned int)(G/max_granules));
655 BUFFEREDSTREAM_FLUSH("\n")
656
657
658 while (!done) {
659 //---< reset counters with every aggregation >---
660 nBlocks_t1 = 0;
661 nBlocks_t2 = 0;
662 nBlocks_alive = 0;
663 nBlocks_dead = 0;
664 nBlocks_unloaded = 0;
665 nBlocks_stub = 0;
666
667 nBlocks_free = 0;
668 nBlocks_used = 0;
669 nBlocks_zomb = 0;
670 nBlocks_disconn = 0;
671 nBlocks_notentr = 0;
672
673 //---< discard old arrays if size does not match >---
674 if (granules != alloc_granules) {
675 discard_StatArray(out);
676 discard_TopSizeArray(out);
677 }
678
679 //---< allocate arrays if they don't yet exist, initialize >---
680 prepare_StatArray(out, granules, granularity, heapName);
681 if (StatArray == NULL) {
682 set_HeapStatGlobals(out, heapName);
683 return;
684 }
685 prepare_TopSizeArray(out, maxTopSizeBlocks, heapName);
686 prepare_SizeDistArray(out, nSizeDistElements, heapName);
687
688 latest_compilation_id = CompileBroker::get_compilation_id();
689 unsigned int highest_compilation_id = 0;
690 size_t usedSpace = 0;
691 size_t t1Space = 0;
692 size_t t2Space = 0;
693 size_t aliveSpace = 0;
694 size_t disconnSpace = 0;
695 size_t notentrSpace = 0;
696 size_t deadSpace = 0;
697 size_t unloadedSpace = 0;
698 size_t stubSpace = 0;
699 size_t freeSpace = 0;
700 size_t maxFreeSize = 0;
701 HeapBlock* maxFreeBlock = NULL;
702 bool insane = false;
703
704 int64_t hotnessAccumulator = 0;
705 unsigned int n_methods = 0;
706 avgTemp = 0;
707 minTemp = (int)(res_size > M ? (res_size/M)*2 : 1);
708 maxTemp = -minTemp;
709
710 for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
711 unsigned int hb_len = (unsigned int)h->length(); // despite being size_t, length can never overflow an unsigned int.
712 size_t hb_bytelen = ((size_t)hb_len)<<log2_seg_size;
713 unsigned int ix_beg = (unsigned int)(((char*)h-low_bound)/granule_size);
714 unsigned int ix_end = (unsigned int)(((char*)h-low_bound+(hb_bytelen-1))/granule_size);
715 unsigned int compile_id = 0;
716 CompLevel comp_lvl = CompLevel_none;
738 if (ix_beg > ix_end) {
739 insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
740 }
741 if (insane) {
742 BUFFEREDSTREAM_FLUSH("")
743 continue;
744 }
745
746 if (h->free()) {
747 nBlocks_free++;
748 freeSpace += hb_bytelen;
749 if (hb_bytelen > maxFreeSize) {
750 maxFreeSize = hb_bytelen;
751 maxFreeBlock = h;
752 }
753 } else {
754 update_SizeDistArray(out, hb_len);
755 nBlocks_used++;
756 usedSpace += hb_bytelen;
757 CodeBlob* cb = (CodeBlob*)heap->find_start(h);
758 cbType = get_cbType(cb); // Will check for cb == NULL and other safety things.
759 if (cbType != noType) {
760 const char* blob_name = os::strdup(cb->name());
761 unsigned int nm_size = 0;
762 int temperature = 0;
763 nmethod* nm = cb->as_nmethod_or_null();
764 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
765 ResourceMark rm;
766 Method* method = nm->method();
767 if (nm->is_in_use()) {
768 blob_name = os::strdup(method->name_and_sig_as_C_string());
769 }
770 if (nm->is_not_entrant()) {
771 blob_name = os::strdup(method->name_and_sig_as_C_string());
772 }
773
774 nm_size = nm->total_size();
775 compile_id = nm->compile_id();
776 comp_lvl = (CompLevel)(nm->comp_level());
777 if (nm->is_compiled_by_c1()) {
778 cType = c1;
779 }
780 if (nm->is_compiled_by_c2()) {
781 cType = c2;
782 }
783 if (nm->is_compiled_by_jvmci()) {
784 cType = jvmci;
785 }
786 switch (cbType) {
787 case nMethod_inuse: { // only for executable methods!!!
788 // space for these cbs is accounted for later.
789 temperature = nm->hotness_counter();
790 hotnessAccumulator += temperature;
791 n_methods++;
792 maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
793 minTemp = (temperature < minTemp) ? temperature : minTemp;
794 break;
795 }
796 case nMethod_notused:
797 nBlocks_alive++;
798 nBlocks_disconn++;
799 aliveSpace += hb_bytelen;
800 disconnSpace += hb_bytelen;
801 break;
802 case nMethod_notentrant: // equivalent to nMethod_alive
803 nBlocks_alive++;
804 nBlocks_notentr++;
805 aliveSpace += hb_bytelen;
806 notentrSpace += hb_bytelen;
807 break;
808 case nMethod_unloaded:
809 nBlocks_unloaded++;
810 unloadedSpace += hb_bytelen;
811 break;
812 case nMethod_dead:
813 nBlocks_dead++;
814 deadSpace += hb_bytelen;
815 break;
816 default:
817 break;
818 }
819 }
820
821 //------------------------------------------
822 //---< register block in TopSizeArray >---
823 //------------------------------------------
824 if (alloc_topSizeBlocks > 0) {
825 if (used_topSizeBlocks == 0) {
826 TopSizeArray[0].start = h;
827 TopSizeArray[0].blob_name = blob_name;
828 TopSizeArray[0].len = hb_len;
829 TopSizeArray[0].index = tsbStopper;
830 TopSizeArray[0].nm_size = nm_size;
831 TopSizeArray[0].temperature = temperature;
832 TopSizeArray[0].compiler = cType;
833 TopSizeArray[0].level = comp_lvl;
834 TopSizeArray[0].type = cbType;
835 currMax = hb_len;
836 currMin = hb_len;
837 currMin_ix = 0;
838 used_topSizeBlocks++;
839 blob_name = NULL; // indicate blob_name was consumed
840 // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
841 } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
842 //---< all blocks in list are larger, but there is room left in array >---
843 TopSizeArray[currMin_ix].index = used_topSizeBlocks;
844 TopSizeArray[used_topSizeBlocks].start = h;
845 TopSizeArray[used_topSizeBlocks].blob_name = blob_name;
846 TopSizeArray[used_topSizeBlocks].len = hb_len;
847 TopSizeArray[used_topSizeBlocks].index = tsbStopper;
848 TopSizeArray[used_topSizeBlocks].nm_size = nm_size;
849 TopSizeArray[used_topSizeBlocks].temperature = temperature;
850 TopSizeArray[used_topSizeBlocks].compiler = cType;
851 TopSizeArray[used_topSizeBlocks].level = comp_lvl;
852 TopSizeArray[used_topSizeBlocks].type = cbType;
853 currMin = hb_len;
854 currMin_ix = used_topSizeBlocks;
855 used_topSizeBlocks++;
856 blob_name = NULL; // indicate blob_name was consumed
857 } else {
858 // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
859 // We don't need to search the list if we know beforehand that the current block size is
860 // smaller than the currently recorded minimum and there is no free entry left in the list.
861 if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
862 if (currMax < hb_len) {
863 currMax = hb_len;
864 }
865 unsigned int i;
866 unsigned int prev_i = tsbStopper;
867 unsigned int limit_i = 0;
868 for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
869 if (limit_i++ >= alloc_topSizeBlocks) {
870 insane = true; break; // emergency exit
871 }
872 if (i >= used_topSizeBlocks) {
873 insane = true; break; // emergency exit
874 }
875 total_iterations++;
876 if (TopSizeArray[i].len < hb_len) {
877 //---< We want to insert here, element <i> is smaller than the current one >---
878 if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
879 // old entry gets moved to the next free element of the array.
880 // That's necessary to keep the entry for the largest block at index 0.
881 // This move might cause the current minimum to be moved to another place
882 if (i == currMin_ix) {
883 assert(TopSizeArray[i].len == currMin, "sort error");
884 currMin_ix = used_topSizeBlocks;
885 }
886 memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
887 TopSizeArray[i].start = h;
888 TopSizeArray[i].blob_name = blob_name;
889 TopSizeArray[i].len = hb_len;
890 TopSizeArray[i].index = used_topSizeBlocks;
891 TopSizeArray[i].nm_size = nm_size;
892 TopSizeArray[i].temperature = temperature;
893 TopSizeArray[i].compiler = cType;
894 TopSizeArray[i].level = comp_lvl;
895 TopSizeArray[i].type = cbType;
896 used_topSizeBlocks++;
897 blob_name = NULL; // indicate blob_name was consumed
898 } else { // no room for new entries, current block replaces entry for smallest block
899 //---< Find last entry (entry for smallest remembered block) >---
900 // We either want to insert right before the smallest entry, which is when <i>
901 // indexes the smallest entry. We then just overwrite the smallest entry.
902 // What's more likely:
903 // We want to insert somewhere in the list. The smallest entry (@<j>) then falls off the cliff.
904 // The element at the insert point <i> takes it's slot. The second-smallest entry now becomes smallest.
905 // Data of the current block is filled in at index <i>.
906 unsigned int j = i;
907 unsigned int prev_j = tsbStopper;
908 unsigned int limit_j = 0;
909 while (TopSizeArray[j].index != tsbStopper) {
910 if (limit_j++ >= alloc_topSizeBlocks) {
911 insane = true; break; // emergency exit
912 }
913 if (j >= used_topSizeBlocks) {
914 insane = true; break; // emergency exit
915 }
916 total_iterations++;
917 prev_j = j;
918 j = TopSizeArray[j].index;
919 }
920 if (!insane) {
921 if (TopSizeArray[j].blob_name != NULL) {
922 os::free((void*)TopSizeArray[j].blob_name);
923 }
924 if (prev_j == tsbStopper) {
925 //---< Above while loop did not iterate, we already are the min entry >---
926 //---< We have to just replace the smallest entry >---
927 currMin = hb_len;
928 currMin_ix = j;
929 TopSizeArray[j].start = h;
930 TopSizeArray[j].blob_name = blob_name;
931 TopSizeArray[j].len = hb_len;
932 TopSizeArray[j].index = tsbStopper; // already set!!
933 TopSizeArray[i].nm_size = nm_size;
934 TopSizeArray[i].temperature = temperature;
935 TopSizeArray[j].compiler = cType;
936 TopSizeArray[j].level = comp_lvl;
937 TopSizeArray[j].type = cbType;
938 } else {
939 //---< second-smallest entry is now smallest >---
940 TopSizeArray[prev_j].index = tsbStopper;
941 currMin = TopSizeArray[prev_j].len;
942 currMin_ix = prev_j;
943 //---< previously smallest entry gets overwritten >---
944 memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
945 TopSizeArray[i].start = h;
946 TopSizeArray[i].blob_name = blob_name;
947 TopSizeArray[i].len = hb_len;
948 TopSizeArray[i].index = j;
949 TopSizeArray[i].nm_size = nm_size;
950 TopSizeArray[i].temperature = temperature;
951 TopSizeArray[i].compiler = cType;
952 TopSizeArray[i].level = comp_lvl;
953 TopSizeArray[i].type = cbType;
954 }
955 blob_name = NULL; // indicate blob_name was consumed
956 } // insane
957 }
958 break;
959 }
960 prev_i = i;
961 }
962 if (insane) {
963 // Note: regular analysis could probably continue by resetting "insane" flag.
964 out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
965 discard_TopSizeArray(out);
966 }
967 }
968 }
969 }
970 if (blob_name != NULL) {
971 os::free((void*)blob_name);
972 blob_name = NULL;
973 }
974 //----------------------------------------------
975 //---< END register block in TopSizeArray >---
976 //----------------------------------------------
977 } else {
978 nBlocks_zomb++;
979 }
980
981 if (ix_beg == ix_end) {
982 StatArray[ix_beg].type = cbType;
983 switch (cbType) {
984 case nMethod_inuse:
985 highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
986 if (comp_lvl < CompLevel_full_optimization) {
987 nBlocks_t1++;
988 t1Space += hb_bytelen;
989 StatArray[ix_beg].t1_count++;
990 StatArray[ix_beg].t1_space += (unsigned short)hb_len;
991 StatArray[ix_beg].t1_age = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
992 } else {
993 nBlocks_t2++;
994 t2Space += hb_bytelen;
995 StatArray[ix_beg].t2_count++;
996 StatArray[ix_beg].t2_space += (unsigned short)hb_len;
997 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
998 }
999 StatArray[ix_beg].level = comp_lvl;
1000 StatArray[ix_beg].compiler = cType;
1001 break;
1002 case nMethod_alive:
1003 StatArray[ix_beg].tx_count++;
1004 StatArray[ix_beg].tx_space += (unsigned short)hb_len;
1005 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
1006 StatArray[ix_beg].level = comp_lvl;
1007 StatArray[ix_beg].compiler = cType;
1008 break;
1009 case nMethod_dead:
1010 case nMethod_unloaded:
1011 StatArray[ix_beg].dead_count++;
1012 StatArray[ix_beg].dead_space += (unsigned short)hb_len;
1013 break;
1014 default:
1015 // must be a stub, if it's not a dead or alive nMethod
1016 nBlocks_stub++;
1017 stubSpace += hb_bytelen;
1018 StatArray[ix_beg].stub_count++;
1019 StatArray[ix_beg].stub_space += (unsigned short)hb_len;
1020 break;
1021 }
1038
1039 StatArray[ix_end].t1_count++;
1040 StatArray[ix_end].t1_space += (unsigned short)end_space;
1041 StatArray[ix_end].t1_age = StatArray[ix_end].t1_age < compile_id ? compile_id : StatArray[ix_end].t1_age;
1042 } else {
1043 nBlocks_t2++;
1044 t2Space += hb_bytelen;
1045 StatArray[ix_beg].t2_count++;
1046 StatArray[ix_beg].t2_space += (unsigned short)beg_space;
1047 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
1048
1049 StatArray[ix_end].t2_count++;
1050 StatArray[ix_end].t2_space += (unsigned short)end_space;
1051 StatArray[ix_end].t2_age = StatArray[ix_end].t2_age < compile_id ? compile_id : StatArray[ix_end].t2_age;
1052 }
1053 StatArray[ix_beg].level = comp_lvl;
1054 StatArray[ix_beg].compiler = cType;
1055 StatArray[ix_end].level = comp_lvl;
1056 StatArray[ix_end].compiler = cType;
1057 break;
1058 case nMethod_alive:
1059 StatArray[ix_beg].tx_count++;
1060 StatArray[ix_beg].tx_space += (unsigned short)beg_space;
1061 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
1062
1063 StatArray[ix_end].tx_count++;
1064 StatArray[ix_end].tx_space += (unsigned short)end_space;
1065 StatArray[ix_end].tx_age = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
1066
1067 StatArray[ix_beg].level = comp_lvl;
1068 StatArray[ix_beg].compiler = cType;
1069 StatArray[ix_end].level = comp_lvl;
1070 StatArray[ix_end].compiler = cType;
1071 break;
1072 case nMethod_dead:
1073 case nMethod_unloaded:
1074 StatArray[ix_beg].dead_count++;
1075 StatArray[ix_beg].dead_space += (unsigned short)beg_space;
1076 StatArray[ix_end].dead_count++;
1077 StatArray[ix_end].dead_space += (unsigned short)end_space;
1085 StatArray[ix_end].stub_count++;
1086 StatArray[ix_end].stub_space += (unsigned short)end_space;
1087 break;
1088 }
1089 for (unsigned int ix = ix_beg+1; ix < ix_end; ix++) {
1090 StatArray[ix].type = cbType;
1091 switch (cbType) {
1092 case nMethod_inuse:
1093 if (comp_lvl < CompLevel_full_optimization) {
1094 StatArray[ix].t1_count++;
1095 StatArray[ix].t1_space += (unsigned short)(granule_size>>log2_seg_size);
1096 StatArray[ix].t1_age = StatArray[ix].t1_age < compile_id ? compile_id : StatArray[ix].t1_age;
1097 } else {
1098 StatArray[ix].t2_count++;
1099 StatArray[ix].t2_space += (unsigned short)(granule_size>>log2_seg_size);
1100 StatArray[ix].t2_age = StatArray[ix].t2_age < compile_id ? compile_id : StatArray[ix].t2_age;
1101 }
1102 StatArray[ix].level = comp_lvl;
1103 StatArray[ix].compiler = cType;
1104 break;
1105 case nMethod_alive:
1106 StatArray[ix].tx_count++;
1107 StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
1108 StatArray[ix].tx_age = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
1109 StatArray[ix].level = comp_lvl;
1110 StatArray[ix].compiler = cType;
1111 break;
1112 case nMethod_dead:
1113 case nMethod_unloaded:
1114 StatArray[ix].dead_count++;
1115 StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
1116 break;
1117 default:
1118 // must be a stub, if it's not a dead or alive nMethod
1119 StatArray[ix].stub_count++;
1120 StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
1121 break;
1122 }
1123 }
1124 }
1125 }
1126 }
1127 done = true;
1128
1129 if (!insane) {
1130 // There is a risk for this block (because it contains many print statements) to get
1131 // interspersed with print data from other threads. We take this risk intentionally.
1132 // Getting stalled waiting for tty_lock while holding the CodeCache_lock is not desirable.
1133 printBox(ast, '-', "Global CodeHeap statistics for segment ", heapName);
1134 ast->print_cr("freeSpace = " SIZE_FORMAT_W(8) "k, nBlocks_free = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", freeSpace/(size_t)K, nBlocks_free, (100.0*freeSpace)/size, (100.0*freeSpace)/res_size);
1135 ast->print_cr("usedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_used = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", usedSpace/(size_t)K, nBlocks_used, (100.0*usedSpace)/size, (100.0*usedSpace)/res_size);
1136 ast->print_cr(" Tier1 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t1 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t1Space/(size_t)K, nBlocks_t1, (100.0*t1Space)/size, (100.0*t1Space)/res_size);
1137 ast->print_cr(" Tier2 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t2 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t2Space/(size_t)K, nBlocks_t2, (100.0*t2Space)/size, (100.0*t2Space)/res_size);
1138 ast->print_cr(" Alive Space = " SIZE_FORMAT_W(8) "k, nBlocks_alive = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K, nBlocks_alive, (100.0*aliveSpace)/size, (100.0*aliveSpace)/res_size);
1139 ast->print_cr(" disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K, nBlocks_disconn, (100.0*disconnSpace)/size, (100.0*disconnSpace)/res_size);
1140 ast->print_cr(" not entrant = " SIZE_FORMAT_W(8) "k, nBlocks_notentr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K, nBlocks_notentr, (100.0*notentrSpace)/size, (100.0*notentrSpace)/res_size);
1141 ast->print_cr(" unloadedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
1142 ast->print_cr(" deadSpace = " SIZE_FORMAT_W(8) "k, nBlocks_dead = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K, nBlocks_dead, (100.0*deadSpace)/size, (100.0*deadSpace)/res_size);
1143 ast->print_cr(" stubSpace = " SIZE_FORMAT_W(8) "k, nBlocks_stub = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K, nBlocks_stub, (100.0*stubSpace)/size, (100.0*stubSpace)/res_size);
1144 ast->print_cr("ZombieBlocks = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
1145 ast->cr();
1146 ast->print_cr("Segment start = " INTPTR_FORMAT ", used space = " SIZE_FORMAT_W(8)"k", p2i(low_bound), size/K);
1147 ast->print_cr("Segment end (used) = " INTPTR_FORMAT ", remaining space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + size, (res_size - size)/K);
1148 ast->print_cr("Segment end (reserved) = " INTPTR_FORMAT ", reserved space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + res_size, res_size/K);
1149 ast->cr();
1150 ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
1151 ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
1152 ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
1153 ast->cr();
1154
1155 int reset_val = NMethodSweeper::hotness_counter_reset_val();
1156 double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
1157 printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
1158 ast->print_cr("Highest possible method temperature: %12d", reset_val);
1159 ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
1160 if (n_methods > 0) {
1298 ast->print_cr("Free block count mismatch could not be resolved.");
1299 ast->print_cr("Try to run \"aggregate\" function to update counters");
1300 }
1301 BUFFEREDSTREAM_FLUSH("")
1302
1303 //---< discard old array and update global values >---
1304 discard_FreeArray(out);
1305 set_HeapStatGlobals(out, heapName);
1306 return;
1307 }
1308
1309 //---< calculate and fill remaining fields >---
1310 if (FreeArray != NULL) {
1311 // This loop is intentionally printing directly to "out".
1312 // It should not print anything, anyway.
1313 for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
1314 size_t lenSum = 0;
1315 FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
1316 for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
1317 CodeBlob *cb = (CodeBlob*)(heap->find_start(h));
1318 if ((cb != NULL) && !cb->is_nmethod()) { // checks equivalent to those in get_cbType()
1319 FreeArray[ix].stubs_in_gap = true;
1320 }
1321 FreeArray[ix].n_gapBlocks++;
1322 lenSum += h->length()<<log2_seg_size;
1323 if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
1324 out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
1325 }
1326 }
1327 if (lenSum != FreeArray[ix].gap) {
1328 out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
1329 }
1330 }
1331 }
1332 set_HeapStatGlobals(out, heapName);
1333
1334 printBox(ast, '=', "C O D E H E A P A N A L Y S I S C O M P L E T E for segment ", heapName);
1335 BUFFEREDSTREAM_FLUSH("\n")
1336 }
1337
1338
1350 BUFFEREDSTREAM_DECL(ast, out)
1351
1352 {
1353 printBox(ast, '=', "U S E D S P A C E S T A T I S T I C S for ", heapName);
1354 ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
1355 " and other identifying information with the block size data.\n"
1356 "\n"
1357 " Method names are dynamically retrieved from the code cache at print time.\n"
1358 " Due to the living nature of the code cache and because the CodeCache_lock\n"
1359 " is not continuously held, the displayed name might be wrong or no name\n"
1360 " might be found at all. The likelihood for that to happen increases\n"
1361 " over time passed between analysis and print step.\n", used_topSizeBlocks);
1362 BUFFEREDSTREAM_FLUSH_LOCKED("\n")
1363 }
1364
1365 //----------------------------
1366 //-- Print Top Used Blocks --
1367 //----------------------------
1368 {
1369 char* low_bound = heap->low_boundary();
1370
1371 printBox(ast, '-', "Largest Used Blocks in ", heapName);
1372 print_blobType_legend(ast);
1373
1374 ast->fill_to(51);
1375 ast->print("%4s", "blob");
1376 ast->fill_to(56);
1377 ast->print("%9s", "compiler");
1378 ast->fill_to(66);
1379 ast->print_cr("%6s", "method");
1380 ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name");
1381 BUFFEREDSTREAM_FLUSH_LOCKED("")
1382
1383 //---< print Top Ten Used Blocks >---
1384 if (used_topSizeBlocks > 0) {
1385 unsigned int printed_topSizeBlocks = 0;
1386 for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
1387 printed_topSizeBlocks++;
1388 if (TopSizeArray[i].blob_name == NULL) {
1389 TopSizeArray[i].blob_name = os::strdup("unnamed blob or blob name unavailable");
1390 }
1391 // heap->find_start() is safe. Only works on _segmap.
1392 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
1393 HeapBlock* heapBlock = TopSizeArray[i].start;
1394 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(heapBlock));
1395 if (this_blob != NULL) {
1396 //---< access these fields only if we own the CodeCache_lock >---
1397 //---< blob address >---
1398 ast->print(INTPTR_FORMAT, p2i(this_blob));
1399 ast->fill_to(19);
1400 //---< blob offset from CodeHeap begin >---
1401 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
1402 ast->fill_to(33);
1403 } else {
1404 //---< block address >---
1405 ast->print(INTPTR_FORMAT, p2i(TopSizeArray[i].start));
1406 ast->fill_to(19);
1407 //---< block offset from CodeHeap begin >---
1408 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
1409 ast->fill_to(33);
1410 }
1411
1412 //---< print size, name, and signature (for nMethods) >---
1413 bool is_nmethod = TopSizeArray[i].nm_size > 0;
1414 if (is_nmethod) {
1415 //---< nMethod size in hex >---
1416 ast->print(PTR32_FORMAT, TopSizeArray[i].nm_size);
1417 ast->print("(" SIZE_FORMAT_W(4) "K)", TopSizeArray[i].nm_size/K);
1418 ast->fill_to(51);
1419 ast->print(" %c", blobTypeChar[TopSizeArray[i].type]);
1420 //---< compiler information >---
1421 ast->fill_to(56);
1422 ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
1423 //---< method temperature >---
1424 ast->fill_to(67);
1425 ast->print("%5d", TopSizeArray[i].temperature);
1426 //---< name and signature >---
1427 ast->fill_to(67+6);
1428 if (TopSizeArray[i].type == nMethod_dead) {
1429 ast->print(" zombie method ");
1430 }
1431 ast->print("%s", TopSizeArray[i].blob_name);
1432 } else {
1433 //---< block size in hex >---
1434 ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
1435 ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
1436 //---< no compiler information >---
1437 ast->fill_to(56);
1438 //---< name and signature >---
1439 ast->fill_to(67+6);
1440 ast->print("%s", TopSizeArray[i].blob_name);
1441 }
1442 ast->cr();
1443 BUFFEREDSTREAM_FLUSH_AUTO("")
1444 }
1445 if (used_topSizeBlocks != printed_topSizeBlocks) {
1446 ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
1447 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
1448 ast->print_cr(" TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
1449 BUFFEREDSTREAM_FLUSH_AUTO("")
1450 }
1451 }
1452 BUFFEREDSTREAM_FLUSH("\n\n")
1453 }
1454 }
1455
1456 //-----------------------------
1457 //-- Print Usage Histogram --
1458 //-----------------------------
1459
1460 if (SizeDistributionArray != NULL) {
2201 }
2202
2203
2204 void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
2205 if (!initialization_complete) {
2206 return;
2207 }
2208
2209 const char* heapName = get_heapName(heap);
2210 get_HeapStatGlobals(out, heapName);
2211
2212 if ((StatArray == NULL) || (alloc_granules == 0)) {
2213 return;
2214 }
2215 BUFFEREDSTREAM_DECL(ast, out)
2216
2217 unsigned int granules_per_line = 128;
2218 char* low_bound = heap->low_boundary();
2219 CodeBlob* last_blob = NULL;
2220 bool name_in_addr_range = true;
2221 bool have_locks = holding_required_locks();
2222
2223 //---< print at least 128K per block (i.e. between headers) >---
2224 if (granules_per_line*granule_size < 128*K) {
2225 granules_per_line = (unsigned int)((128*K)/granule_size);
2226 }
2227
2228 printBox(ast, '=', "M E T H O D N A M E S for ", heapName);
2229 ast->print_cr(" Method names are dynamically retrieved from the code cache at print time.\n"
2230 " Due to the living nature of the code heap and because the CodeCache_lock\n"
2231 " is not continuously held, the displayed name might be wrong or no name\n"
2232 " might be found at all. The likelihood for that to happen increases\n"
2233 " over time passed between aggregation and print steps.\n");
2234 BUFFEREDSTREAM_FLUSH_LOCKED("")
2235
2236 for (unsigned int ix = 0; ix < alloc_granules; ix++) {
2237 //---< print a new blob on a new line >---
2238 if (ix%granules_per_line == 0) {
2239 if (!name_in_addr_range) {
2240 ast->print_cr("No methods, blobs, or stubs found in this address range");
2241 }
2242 name_in_addr_range = false;
2243
2244 size_t end_ix = (ix+granules_per_line <= alloc_granules) ? ix+granules_per_line : alloc_granules;
2245 ast->cr();
2246 ast->print_cr("--------------------------------------------------------------------");
2247 ast->print_cr("Address range [" INTPTR_FORMAT "," INTPTR_FORMAT "), " SIZE_FORMAT "k", p2i(low_bound+ix*granule_size), p2i(low_bound + end_ix*granule_size), (end_ix - ix)*granule_size/(size_t)K);
2248 ast->print_cr("--------------------------------------------------------------------");
2249 BUFFEREDSTREAM_FLUSH_AUTO("")
2250 }
2251 // Only check granule if it contains at least one blob.
2252 unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
2253 StatArray[ix].stub_count + StatArray[ix].dead_count;
2254 if (nBlobs > 0 ) {
2255 for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
2256 // heap->find_start() is safe. Only works on _segmap.
2257 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
2258 char* this_seg = low_bound + ix*granule_size + is;
2259 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(this_seg));
2260 bool blob_is_safe = blob_access_is_safe(this_blob);
2261 // blob could have been flushed, freed, and merged.
2262 // this_blob < last_blob is an indicator for that.
2263 if (blob_is_safe && (this_blob > last_blob)) {
2264 last_blob = this_blob;
2265
2266 //---< get type and name >---
2267 blobType cbType = noType;
2268 if (segment_granules) {
2269 cbType = (blobType)StatArray[ix].type;
2270 } else {
2271 //---< access these fields only if we own the CodeCache_lock >---
2272 if (have_locks) {
2273 cbType = get_cbType(this_blob);
2274 }
2275 }
2276
2277 //---< access these fields only if we own the CodeCache_lock >---
2278 const char* blob_name = "<unavailable>";
2279 nmethod* nm = NULL;
2280 if (have_locks) {
2281 blob_name = this_blob->name();
2282 nm = this_blob->as_nmethod_or_null();
2283 // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
2284 if (blob_name == NULL) {
2285 blob_name = "<unavailable>";
2286 }
2287 }
2288
2289 //---< print table header for new print range >---
2290 if (!name_in_addr_range) {
2291 name_in_addr_range = true;
2292 ast->fill_to(51);
2293 ast->print("%9s", "compiler");
2294 ast->fill_to(61);
2295 ast->print_cr("%6s", "method");
2296 ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
2297 BUFFEREDSTREAM_FLUSH_AUTO("")
2298 }
2299
2300 //---< print line prefix (address and offset from CodeHeap start) >---
2301 ast->print(INTPTR_FORMAT, p2i(this_blob));
2302 ast->fill_to(19);
2303 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
2304 ast->fill_to(33);
2305
2306 // access nmethod and Method fields only if we own the CodeCache_lock.
2307 // This fact is implicitly transported via nm != NULL.
2308 if (nmethod_access_is_safe(nm)) {
2309 Method* method = nm->method();
2310 ResourceMark rm;
2311 //---< collect all data to locals as quickly as possible >---
2312 unsigned int total_size = nm->total_size();
2313 int hotness = nm->hotness_counter();
2314 bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
2315 //---< nMethod size in hex >---
2316 ast->print(PTR32_FORMAT, total_size);
2317 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
2318 //---< compiler information >---
2319 ast->fill_to(51);
2320 ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
2321 //---< method temperature >---
2322 ast->fill_to(62);
2323 ast->print("%5d", hotness);
2324 //---< name and signature >---
2325 ast->fill_to(62+6);
2326 ast->print("%s", blobTypeName[cbType]);
2327 ast->fill_to(82+6);
2328 if (cbType == nMethod_dead) {
2494 ast->print("|");
2495 }
2496 ast->cr();
2497
2498 // can't use BUFFEREDSTREAM_FLUSH_IF("", 512) here.
2499 // can't use this expression. bufferedStream::capacity() does not exist.
2500 // if ((ast->capacity() - ast->size()) < 512) {
2501 // Assume instead that default bufferedStream capacity (4K) was used.
2502 if (ast->size() > 3*K) {
2503 ttyLocker ttyl;
2504 out->print("%s", ast->as_string());
2505 ast->reset();
2506 }
2507
2508 ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
2509 ast->fill_to(19);
2510 ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
2511 }
2512 }
2513
2514 // Find out which blob type we have at hand.
2515 // Return "noType" if anything abnormal is detected.
2516 CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
2517 if (cb != NULL) {
2518 if (cb->is_runtime_stub()) return runtimeStub;
2519 if (cb->is_deoptimization_stub()) return deoptimizationStub;
2520 if (cb->is_uncommon_trap_stub()) return uncommonTrapStub;
2521 if (cb->is_exception_stub()) return exceptionStub;
2522 if (cb->is_safepoint_stub()) return safepointStub;
2523 if (cb->is_adapter_blob()) return adapterBlob;
2524 if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
2525 if (cb->is_buffer_blob()) return bufferBlob;
2526
2527 //---< access these fields only if we own CodeCache_lock and Compile_lock >---
2528 // Should be ensured by caller. aggregate() and print_names() do that.
2529 if (holding_required_locks()) {
2530 nmethod* nm = cb->as_nmethod_or_null();
2531 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
2532 if (nm->is_zombie()) return nMethod_dead;
2533 if (nm->is_unloaded()) return nMethod_unloaded;
2534 if (nm->is_in_use()) return nMethod_inuse;
2535 if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused;
2536 if (nm->is_alive()) return nMethod_alive;
2537 return nMethod_dead;
2538 }
2539 }
2540 }
2541 return noType;
2542 }
2543
2544 // make sure the blob at hand is not garbage.
2545 bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob) {
2546 return (this_blob != NULL) && // a blob must have been found, obviously
2547 (this_blob->header_size() >= 0) &&
2548 (this_blob->relocation_size() >= 0) &&
2549 ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
2550 ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin()));
2551 }
2552
2553 // make sure the nmethod at hand (and the linked method) is not garbage.
2554 bool CodeHeapState::nmethod_access_is_safe(nmethod* nm) {
2555 Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() was found to be uninitialized, i.e. != NULL, but invalid.
2556 return (nm != NULL) && (method != NULL) && nm->is_alive() && (method->signature() != NULL);
2557 }
2558
2559 bool CodeHeapState::holding_required_locks() {
2560 return SafepointSynchronize::is_at_safepoint() ||
2561 (CodeCache_lock->owned_by_self() && Compile_lock->owned_by_self());
2562 }
|