9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "runtime/sweeper.hpp"
30 #include "utilities/powerOfTwo.hpp"
31
32 // -------------------------
33 // | General Description |
34 // -------------------------
35 // The CodeHeap state analytics are divided in two parts.
36 // The first part examines the entire CodeHeap and aggregates all
37 // information that is believed useful/important.
38 //
39 // Aggregation condenses the information of a piece of the CodeHeap
40 // (4096 bytes by default) into an analysis granule. These granules
41 // contain enough detail to gain initial insight while keeping the
42 // internal structure sizes in check.
43 //
44 // The second part, which consists of several, independent steps,
45 // prints the previously collected information with emphasis on
46 // various aspects.
47 //
48 // The CodeHeap is a living thing. Therefore, protection against concurrent
193 #define BUFFEREDSTREAM_FLUSH(_termString) \
194 if (((_termString) != NULL) && (strlen(_termString) > 0)){\
195 _outbuf->print("%s", _termString); \
196 }
197
198 #define BUFFEREDSTREAM_FLUSH_IF(_termString, _remSize) \
199 BUFFEREDSTREAM_FLUSH(_termString)
200
201 #define BUFFEREDSTREAM_FLUSH_AUTO(_termString) \
202 BUFFEREDSTREAM_FLUSH(_termString)
203
204 #define BUFFEREDSTREAM_FLUSH_LOCKED(_termString) \
205 BUFFEREDSTREAM_FLUSH(_termString)
206
207 #define BUFFEREDSTREAM_FLUSH_STAT()
208 #endif
209 #define HEX32_FORMAT "0x%x" // just a helper format string used below multiple times
210
211 const char blobTypeChar[] = {' ', 'C', 'N', 'I', 'X', 'Z', 'U', 'R', '?', 'D', 'T', 'E', 'S', 'A', 'M', 'B', 'L' };
212 const char* blobTypeName[] = {"noType"
213 , "nMethod (under construction)"
214 , "nMethod (active)"
215 , "nMethod (inactive)"
216 , "nMethod (deopt)"
217 , "nMethod (zombie)"
218 , "nMethod (unloaded)"
219 , "runtime stub"
220 , "ricochet stub"
221 , "deopt stub"
222 , "uncommon trap stub"
223 , "exception stub"
224 , "safepoint stub"
225 , "adapter blob"
226 , "MH adapter blob"
227 , "buffer blob"
228 , "lastType"
229 };
230 const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
231
232 // Be prepared for ten different CodeHeap segments. Should be enough for a few years.
233 const unsigned int nSizeDistElements = 31; // logarithmic range growth, max size: 2**32
234 const unsigned int maxTopSizeBlocks = 50;
235 const unsigned int tsbStopper = 2 * maxTopSizeBlocks;
236 const unsigned int maxHeaps = 10;
237 static unsigned int nHeaps = 0;
238 static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
239
240 // static struct StatElement *StatArray = NULL;
241 static StatElement* StatArray = NULL;
242 static int log2_seg_size = 0;
243 static size_t seg_size = 0;
244 static size_t alloc_granules = 0;
245 static size_t granule_size = 0;
246 static bool segment_granules = false;
247 static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only.
248 static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only.
249 static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only.
250 static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only.
251 static unsigned int nBlocks_inconstr = 0; // counting "inconstruction" nmethods only. This is a transient state.
252 static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state.
253 static unsigned int nBlocks_stub = 0;
254
255 static struct FreeBlk* FreeArray = NULL;
256 static unsigned int alloc_freeBlocks = 0;
257
258 static struct TopSizeBlk* TopSizeArray = NULL;
259 static unsigned int alloc_topSizeBlocks = 0;
260 static unsigned int used_topSizeBlocks = 0;
261
262 static struct SizeDistributionElement* SizeDistributionArray = NULL;
263
264 // nMethod temperature (hotness) indicators.
265 static int avgTemp = 0;
266 static int maxTemp = 0;
267 static int minTemp = 0;
268
269 static unsigned int latest_compilation_id = 0;
270 static volatile bool initialization_complete = false;
271
302 } else {
303 nHeaps = 1;
304 CodeHeapStatArray[0].heapName = heapName;
305 return 0; // This is the default index if CodeCache is not segmented.
306 }
307 }
308
309 void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) {
310 unsigned int ix = findHeapIndex(out, heapName);
311 if (ix < maxHeaps) {
312 StatArray = CodeHeapStatArray[ix].StatArray;
313 seg_size = CodeHeapStatArray[ix].segment_size;
314 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size);
315 alloc_granules = CodeHeapStatArray[ix].alloc_granules;
316 granule_size = CodeHeapStatArray[ix].granule_size;
317 segment_granules = CodeHeapStatArray[ix].segment_granules;
318 nBlocks_t1 = CodeHeapStatArray[ix].nBlocks_t1;
319 nBlocks_t2 = CodeHeapStatArray[ix].nBlocks_t2;
320 nBlocks_alive = CodeHeapStatArray[ix].nBlocks_alive;
321 nBlocks_dead = CodeHeapStatArray[ix].nBlocks_dead;
322 nBlocks_inconstr = CodeHeapStatArray[ix].nBlocks_inconstr;
323 nBlocks_unloaded = CodeHeapStatArray[ix].nBlocks_unloaded;
324 nBlocks_stub = CodeHeapStatArray[ix].nBlocks_stub;
325 FreeArray = CodeHeapStatArray[ix].FreeArray;
326 alloc_freeBlocks = CodeHeapStatArray[ix].alloc_freeBlocks;
327 TopSizeArray = CodeHeapStatArray[ix].TopSizeArray;
328 alloc_topSizeBlocks = CodeHeapStatArray[ix].alloc_topSizeBlocks;
329 used_topSizeBlocks = CodeHeapStatArray[ix].used_topSizeBlocks;
330 SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
331 avgTemp = CodeHeapStatArray[ix].avgTemp;
332 maxTemp = CodeHeapStatArray[ix].maxTemp;
333 minTemp = CodeHeapStatArray[ix].minTemp;
334 } else {
335 StatArray = NULL;
336 seg_size = 0;
337 log2_seg_size = 0;
338 alloc_granules = 0;
339 granule_size = 0;
340 segment_granules = false;
341 nBlocks_t1 = 0;
342 nBlocks_t2 = 0;
343 nBlocks_alive = 0;
344 nBlocks_dead = 0;
345 nBlocks_inconstr = 0;
346 nBlocks_unloaded = 0;
347 nBlocks_stub = 0;
348 FreeArray = NULL;
349 alloc_freeBlocks = 0;
350 TopSizeArray = NULL;
351 alloc_topSizeBlocks = 0;
352 used_topSizeBlocks = 0;
353 SizeDistributionArray = NULL;
354 avgTemp = 0;
355 maxTemp = 0;
356 minTemp = 0;
357 }
358 }
359
360 void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) {
361 unsigned int ix = findHeapIndex(out, heapName);
362 if (ix < maxHeaps) {
363 CodeHeapStatArray[ix].StatArray = StatArray;
364 CodeHeapStatArray[ix].segment_size = seg_size;
365 CodeHeapStatArray[ix].alloc_granules = alloc_granules;
366 CodeHeapStatArray[ix].granule_size = granule_size;
367 CodeHeapStatArray[ix].segment_granules = segment_granules;
368 CodeHeapStatArray[ix].nBlocks_t1 = nBlocks_t1;
369 CodeHeapStatArray[ix].nBlocks_t2 = nBlocks_t2;
370 CodeHeapStatArray[ix].nBlocks_alive = nBlocks_alive;
371 CodeHeapStatArray[ix].nBlocks_dead = nBlocks_dead;
372 CodeHeapStatArray[ix].nBlocks_inconstr = nBlocks_inconstr;
373 CodeHeapStatArray[ix].nBlocks_unloaded = nBlocks_unloaded;
374 CodeHeapStatArray[ix].nBlocks_stub = nBlocks_stub;
375 CodeHeapStatArray[ix].FreeArray = FreeArray;
376 CodeHeapStatArray[ix].alloc_freeBlocks = alloc_freeBlocks;
377 CodeHeapStatArray[ix].TopSizeArray = TopSizeArray;
378 CodeHeapStatArray[ix].alloc_topSizeBlocks = alloc_topSizeBlocks;
379 CodeHeapStatArray[ix].used_topSizeBlocks = used_topSizeBlocks;
380 CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
381 CodeHeapStatArray[ix].avgTemp = avgTemp;
382 CodeHeapStatArray[ix].maxTemp = maxTemp;
383 CodeHeapStatArray[ix].minTemp = minTemp;
384 }
385 }
386
387 //---< get a new statistics array >---
388 void CodeHeapState::prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName) {
389 if (StatArray == NULL) {
390 StatArray = new StatElement[nElem];
391 //---< reset some counts >---
392 alloc_granules = nElem;
479
480 void CodeHeapState::discard_StatArray(outputStream* out) {
481 if (StatArray != NULL) {
482 delete StatArray;
483 StatArray = NULL;
484 alloc_granules = 0;
485 granule_size = 0;
486 }
487 }
488
489 void CodeHeapState::discard_FreeArray(outputStream* out) {
490 if (FreeArray != NULL) {
491 delete[] FreeArray;
492 FreeArray = NULL;
493 alloc_freeBlocks = 0;
494 }
495 }
496
497 void CodeHeapState::discard_TopSizeArray(outputStream* out) {
498 if (TopSizeArray != NULL) {
499 delete[] TopSizeArray;
500 TopSizeArray = NULL;
501 alloc_topSizeBlocks = 0;
502 used_topSizeBlocks = 0;
503 }
504 }
505
506 void CodeHeapState::discard_SizeDistArray(outputStream* out) {
507 if (SizeDistributionArray != NULL) {
508 delete[] SizeDistributionArray;
509 SizeDistributionArray = NULL;
510 }
511 }
512
513 // Discard all allocated internal data structures.
514 // This should be done after an analysis session is completed.
515 void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
516 if (!initialization_complete) {
517 return;
518 }
572 BUFFEREDSTREAM_FLUSH("")
573 }
574 get_HeapStatGlobals(out, heapName);
575
576
577 // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
578 // all heap information is "constant" and can be safely extracted/calculated before we
579 // enter the while() loop. Actually, the loop will only be iterated once.
580 char* low_bound = heap->low_boundary();
581 size_t size = heap->capacity();
582 size_t res_size = heap->max_capacity();
583 seg_size = heap->segment_size();
584 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size); // This is a global static value.
585
586 if (seg_size == 0) {
587 printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
588 BUFFEREDSTREAM_FLUSH("")
589 return;
590 }
591
592 if (!CodeCache_lock->owned_by_self()) {
593 printBox(ast, '-', "aggregate function called without holding the CodeCache_lock for ", heapName);
594 BUFFEREDSTREAM_FLUSH("")
595 return;
596 }
597
598 // Calculate granularity of analysis (and output).
599 // The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
600 // The CodeHeap can become fairly large, in particular in productive real-life systems.
601 //
602 // It is often neither feasible nor desirable to aggregate the data with the highest possible
603 // level of detail, i.e. inspecting and printing each segment on its own.
604 //
605 // The granularity parameter allows to specify the level of detail available in the analysis.
606 // It must be a positive multiple of the segment size and should be selected such that enough
607 // detail is provided while, at the same time, the printed output does not explode.
608 //
609 // By manipulating the granularity value, we enforce that at least min_granules units
610 // of analysis are available. We also enforce an upper limit of max_granules units to
611 // keep the amount of allocated storage in check.
612 //
613 // Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
640 " Subsequent print functions create their output based on this snapshot.\n"
641 " The CodeHeap is a living thing, and every effort has been made for the\n"
642 " collected data to be consistent. Only the method names and signatures\n"
643 " are retrieved at print time. That may lead to rare cases where the\n"
644 " name of a method is no longer available, e.g. because it was unloaded.\n");
645 ast->print_cr(" CodeHeap committed size " SIZE_FORMAT "K (" SIZE_FORMAT "M), reserved size " SIZE_FORMAT "K (" SIZE_FORMAT "M), %d%% occupied.",
646 size/(size_t)K, size/(size_t)M, res_size/(size_t)K, res_size/(size_t)M, (unsigned int)(100.0*size/res_size));
647 ast->print_cr(" CodeHeap allocation segment size is " SIZE_FORMAT " bytes. This is the smallest possible granularity.", seg_size);
648 ast->print_cr(" CodeHeap (committed part) is mapped to " SIZE_FORMAT " granules of size " SIZE_FORMAT " bytes.", granules, granularity);
649 ast->print_cr(" Each granule takes " SIZE_FORMAT " bytes of C heap, that is " SIZE_FORMAT "K in total for statistics data.", sizeof(StatElement), (sizeof(StatElement)*granules)/(size_t)K);
650 ast->print_cr(" The number of granules is limited to %dk, requiring a granules size of at least %d bytes for a 1GB heap.", (unsigned int)(max_granules/K), (unsigned int)(G/max_granules));
651 BUFFEREDSTREAM_FLUSH("\n")
652
653
654 while (!done) {
655 //---< reset counters with every aggregation >---
656 nBlocks_t1 = 0;
657 nBlocks_t2 = 0;
658 nBlocks_alive = 0;
659 nBlocks_dead = 0;
660 nBlocks_inconstr = 0;
661 nBlocks_unloaded = 0;
662 nBlocks_stub = 0;
663
664 nBlocks_free = 0;
665 nBlocks_used = 0;
666 nBlocks_zomb = 0;
667 nBlocks_disconn = 0;
668 nBlocks_notentr = 0;
669
670 //---< discard old arrays if size does not match >---
671 if (granules != alloc_granules) {
672 discard_StatArray(out);
673 discard_TopSizeArray(out);
674 }
675
676 //---< allocate arrays if they don't yet exist, initialize >---
677 prepare_StatArray(out, granules, granularity, heapName);
678 if (StatArray == NULL) {
679 set_HeapStatGlobals(out, heapName);
680 return;
681 }
682 prepare_TopSizeArray(out, maxTopSizeBlocks, heapName);
683 prepare_SizeDistArray(out, nSizeDistElements, heapName);
684
685 latest_compilation_id = CompileBroker::get_compilation_id();
686 unsigned int highest_compilation_id = 0;
687 size_t usedSpace = 0;
688 size_t t1Space = 0;
689 size_t t2Space = 0;
690 size_t aliveSpace = 0;
691 size_t disconnSpace = 0;
692 size_t notentrSpace = 0;
693 size_t deadSpace = 0;
694 size_t inconstrSpace = 0;
695 size_t unloadedSpace = 0;
696 size_t stubSpace = 0;
697 size_t freeSpace = 0;
698 size_t maxFreeSize = 0;
699 HeapBlock* maxFreeBlock = NULL;
700 bool insane = false;
701
702 int64_t hotnessAccumulator = 0;
703 unsigned int n_methods = 0;
704 avgTemp = 0;
705 minTemp = (int)(res_size > M ? (res_size/M)*2 : 1);
706 maxTemp = -minTemp;
707
708 for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
709 unsigned int hb_len = (unsigned int)h->length(); // despite being size_t, length can never overflow an unsigned int.
710 size_t hb_bytelen = ((size_t)hb_len)<<log2_seg_size;
711 unsigned int ix_beg = (unsigned int)(((char*)h-low_bound)/granule_size);
712 unsigned int ix_end = (unsigned int)(((char*)h-low_bound+(hb_bytelen-1))/granule_size);
713 unsigned int compile_id = 0;
714 CompLevel comp_lvl = CompLevel_none;
736 if (ix_beg > ix_end) {
737 insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
738 }
739 if (insane) {
740 BUFFEREDSTREAM_FLUSH("")
741 continue;
742 }
743
744 if (h->free()) {
745 nBlocks_free++;
746 freeSpace += hb_bytelen;
747 if (hb_bytelen > maxFreeSize) {
748 maxFreeSize = hb_bytelen;
749 maxFreeBlock = h;
750 }
751 } else {
752 update_SizeDistArray(out, hb_len);
753 nBlocks_used++;
754 usedSpace += hb_bytelen;
755 CodeBlob* cb = (CodeBlob*)heap->find_start(h);
756 if (cb != NULL) {
757 cbType = get_cbType(cb);
758 if (cb->is_nmethod()) {
759 compile_id = ((nmethod*)cb)->compile_id();
760 comp_lvl = (CompLevel)((nmethod*)cb)->comp_level();
761 if (((nmethod*)cb)->is_compiled_by_c1()) {
762 cType = c1;
763 }
764 if (((nmethod*)cb)->is_compiled_by_c2()) {
765 cType = c2;
766 }
767 if (((nmethod*)cb)->is_compiled_by_jvmci()) {
768 cType = jvmci;
769 }
770 switch (cbType) {
771 case nMethod_inuse: { // only for executable methods!!!
772 // space for these cbs is accounted for later.
773 int temperature = ((nmethod*)cb)->hotness_counter();
774 hotnessAccumulator += temperature;
775 n_methods++;
776 maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
777 minTemp = (temperature < minTemp) ? temperature : minTemp;
778 break;
779 }
780 case nMethod_notused:
781 nBlocks_alive++;
782 nBlocks_disconn++;
783 aliveSpace += hb_bytelen;
784 disconnSpace += hb_bytelen;
785 break;
786 case nMethod_notentrant: // equivalent to nMethod_alive
787 nBlocks_alive++;
788 nBlocks_notentr++;
789 aliveSpace += hb_bytelen;
790 notentrSpace += hb_bytelen;
791 break;
792 case nMethod_unloaded:
793 nBlocks_unloaded++;
794 unloadedSpace += hb_bytelen;
795 break;
796 case nMethod_dead:
797 nBlocks_dead++;
798 deadSpace += hb_bytelen;
799 break;
800 case nMethod_inconstruction:
801 nBlocks_inconstr++;
802 inconstrSpace += hb_bytelen;
803 break;
804 default:
805 break;
806 }
807 }
808
809 //------------------------------------------
810 //---< register block in TopSizeArray >---
811 //------------------------------------------
812 if (alloc_topSizeBlocks > 0) {
813 if (used_topSizeBlocks == 0) {
814 TopSizeArray[0].start = h;
815 TopSizeArray[0].len = hb_len;
816 TopSizeArray[0].index = tsbStopper;
817 TopSizeArray[0].compiler = cType;
818 TopSizeArray[0].level = comp_lvl;
819 TopSizeArray[0].type = cbType;
820 currMax = hb_len;
821 currMin = hb_len;
822 currMin_ix = 0;
823 used_topSizeBlocks++;
824 // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
825 } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
826 //---< all blocks in list are larger, but there is room left in array >---
827 TopSizeArray[currMin_ix].index = used_topSizeBlocks;
828 TopSizeArray[used_topSizeBlocks].start = h;
829 TopSizeArray[used_topSizeBlocks].len = hb_len;
830 TopSizeArray[used_topSizeBlocks].index = tsbStopper;
831 TopSizeArray[used_topSizeBlocks].compiler = cType;
832 TopSizeArray[used_topSizeBlocks].level = comp_lvl;
833 TopSizeArray[used_topSizeBlocks].type = cbType;
834 currMin = hb_len;
835 currMin_ix = used_topSizeBlocks;
836 used_topSizeBlocks++;
837 } else {
838 // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
839 // We don't need to search the list if we know beforehand that the current block size is
840 // smaller than the currently recorded minimum and there is no free entry left in the list.
841 if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
842 if (currMax < hb_len) {
843 currMax = hb_len;
844 }
845 unsigned int i;
846 unsigned int prev_i = tsbStopper;
847 unsigned int limit_i = 0;
848 for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
849 if (limit_i++ >= alloc_topSizeBlocks) {
850 insane = true; break; // emergency exit
851 }
852 if (i >= used_topSizeBlocks) {
853 insane = true; break; // emergency exit
854 }
855 total_iterations++;
856 if (TopSizeArray[i].len < hb_len) {
857 //---< We want to insert here, element <i> is smaller than the current one >---
858 if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
859 // old entry gets moved to the next free element of the array.
860 // That's necessary to keep the entry for the largest block at index 0.
861 // This move might cause the current minimum to be moved to another place
862 if (i == currMin_ix) {
863 assert(TopSizeArray[i].len == currMin, "sort error");
864 currMin_ix = used_topSizeBlocks;
865 }
866 memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
867 TopSizeArray[i].start = h;
868 TopSizeArray[i].len = hb_len;
869 TopSizeArray[i].index = used_topSizeBlocks;
870 TopSizeArray[i].compiler = cType;
871 TopSizeArray[i].level = comp_lvl;
872 TopSizeArray[i].type = cbType;
873 used_topSizeBlocks++;
874 } else { // no room for new entries, current block replaces entry for smallest block
875 //---< Find last entry (entry for smallest remembered block) >---
876 unsigned int j = i;
877 unsigned int prev_j = tsbStopper;
878 unsigned int limit_j = 0;
879 while (TopSizeArray[j].index != tsbStopper) {
880 if (limit_j++ >= alloc_topSizeBlocks) {
881 insane = true; break; // emergency exit
882 }
883 if (j >= used_topSizeBlocks) {
884 insane = true; break; // emergency exit
885 }
886 total_iterations++;
887 prev_j = j;
888 j = TopSizeArray[j].index;
889 }
890 if (!insane) {
891 if (prev_j == tsbStopper) {
892 //---< Above while loop did not iterate, we already are the min entry >---
893 //---< We have to just replace the smallest entry >---
894 currMin = hb_len;
895 currMin_ix = j;
896 TopSizeArray[j].start = h;
897 TopSizeArray[j].len = hb_len;
898 TopSizeArray[j].index = tsbStopper; // already set!!
899 TopSizeArray[j].compiler = cType;
900 TopSizeArray[j].level = comp_lvl;
901 TopSizeArray[j].type = cbType;
902 } else {
903 //---< second-smallest entry is now smallest >---
904 TopSizeArray[prev_j].index = tsbStopper;
905 currMin = TopSizeArray[prev_j].len;
906 currMin_ix = prev_j;
907 //---< smallest entry gets overwritten >---
908 memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
909 TopSizeArray[i].start = h;
910 TopSizeArray[i].len = hb_len;
911 TopSizeArray[i].index = j;
912 TopSizeArray[i].compiler = cType;
913 TopSizeArray[i].level = comp_lvl;
914 TopSizeArray[i].type = cbType;
915 }
916 } // insane
917 }
918 break;
919 }
920 prev_i = i;
921 }
922 if (insane) {
923 // Note: regular analysis could probably continue by resetting "insane" flag.
924 out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
925 discard_TopSizeArray(out);
926 }
927 }
928 }
929 }
930 //----------------------------------------------
931 //---< END register block in TopSizeArray >---
932 //----------------------------------------------
933 } else {
934 nBlocks_zomb++;
935 }
936
937 if (ix_beg == ix_end) {
938 StatArray[ix_beg].type = cbType;
939 switch (cbType) {
940 case nMethod_inuse:
941 highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
942 if (comp_lvl < CompLevel_full_optimization) {
943 nBlocks_t1++;
944 t1Space += hb_bytelen;
945 StatArray[ix_beg].t1_count++;
946 StatArray[ix_beg].t1_space += (unsigned short)hb_len;
947 StatArray[ix_beg].t1_age = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
948 } else {
949 nBlocks_t2++;
950 t2Space += hb_bytelen;
951 StatArray[ix_beg].t2_count++;
952 StatArray[ix_beg].t2_space += (unsigned short)hb_len;
953 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
954 }
955 StatArray[ix_beg].level = comp_lvl;
956 StatArray[ix_beg].compiler = cType;
957 break;
958 case nMethod_inconstruction: // let's count "in construction" nmethods here.
959 case nMethod_alive:
960 StatArray[ix_beg].tx_count++;
961 StatArray[ix_beg].tx_space += (unsigned short)hb_len;
962 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
963 StatArray[ix_beg].level = comp_lvl;
964 StatArray[ix_beg].compiler = cType;
965 break;
966 case nMethod_dead:
967 case nMethod_unloaded:
968 StatArray[ix_beg].dead_count++;
969 StatArray[ix_beg].dead_space += (unsigned short)hb_len;
970 break;
971 default:
972 // must be a stub, if it's not a dead or alive nMethod
973 nBlocks_stub++;
974 stubSpace += hb_bytelen;
975 StatArray[ix_beg].stub_count++;
976 StatArray[ix_beg].stub_space += (unsigned short)hb_len;
977 break;
978 }
995
996 StatArray[ix_end].t1_count++;
997 StatArray[ix_end].t1_space += (unsigned short)end_space;
998 StatArray[ix_end].t1_age = StatArray[ix_end].t1_age < compile_id ? compile_id : StatArray[ix_end].t1_age;
999 } else {
1000 nBlocks_t2++;
1001 t2Space += hb_bytelen;
1002 StatArray[ix_beg].t2_count++;
1003 StatArray[ix_beg].t2_space += (unsigned short)beg_space;
1004 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
1005
1006 StatArray[ix_end].t2_count++;
1007 StatArray[ix_end].t2_space += (unsigned short)end_space;
1008 StatArray[ix_end].t2_age = StatArray[ix_end].t2_age < compile_id ? compile_id : StatArray[ix_end].t2_age;
1009 }
1010 StatArray[ix_beg].level = comp_lvl;
1011 StatArray[ix_beg].compiler = cType;
1012 StatArray[ix_end].level = comp_lvl;
1013 StatArray[ix_end].compiler = cType;
1014 break;
1015 case nMethod_inconstruction: // let's count "in construction" nmethods here.
1016 case nMethod_alive:
1017 StatArray[ix_beg].tx_count++;
1018 StatArray[ix_beg].tx_space += (unsigned short)beg_space;
1019 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
1020
1021 StatArray[ix_end].tx_count++;
1022 StatArray[ix_end].tx_space += (unsigned short)end_space;
1023 StatArray[ix_end].tx_age = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
1024
1025 StatArray[ix_beg].level = comp_lvl;
1026 StatArray[ix_beg].compiler = cType;
1027 StatArray[ix_end].level = comp_lvl;
1028 StatArray[ix_end].compiler = cType;
1029 break;
1030 case nMethod_dead:
1031 case nMethod_unloaded:
1032 StatArray[ix_beg].dead_count++;
1033 StatArray[ix_beg].dead_space += (unsigned short)beg_space;
1034 StatArray[ix_end].dead_count++;
1035 StatArray[ix_end].dead_space += (unsigned short)end_space;
1043 StatArray[ix_end].stub_count++;
1044 StatArray[ix_end].stub_space += (unsigned short)end_space;
1045 break;
1046 }
1047 for (unsigned int ix = ix_beg+1; ix < ix_end; ix++) {
1048 StatArray[ix].type = cbType;
1049 switch (cbType) {
1050 case nMethod_inuse:
1051 if (comp_lvl < CompLevel_full_optimization) {
1052 StatArray[ix].t1_count++;
1053 StatArray[ix].t1_space += (unsigned short)(granule_size>>log2_seg_size);
1054 StatArray[ix].t1_age = StatArray[ix].t1_age < compile_id ? compile_id : StatArray[ix].t1_age;
1055 } else {
1056 StatArray[ix].t2_count++;
1057 StatArray[ix].t2_space += (unsigned short)(granule_size>>log2_seg_size);
1058 StatArray[ix].t2_age = StatArray[ix].t2_age < compile_id ? compile_id : StatArray[ix].t2_age;
1059 }
1060 StatArray[ix].level = comp_lvl;
1061 StatArray[ix].compiler = cType;
1062 break;
1063 case nMethod_inconstruction: // let's count "in construction" nmethods here.
1064 case nMethod_alive:
1065 StatArray[ix].tx_count++;
1066 StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
1067 StatArray[ix].tx_age = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
1068 StatArray[ix].level = comp_lvl;
1069 StatArray[ix].compiler = cType;
1070 break;
1071 case nMethod_dead:
1072 case nMethod_unloaded:
1073 StatArray[ix].dead_count++;
1074 StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
1075 break;
1076 default:
1077 // must be a stub, if it's not a dead or alive nMethod
1078 StatArray[ix].stub_count++;
1079 StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
1080 break;
1081 }
1082 }
1083 }
1084 }
1085 }
1086 done = true;
1087
1088 if (!insane) {
1089 // There is a risk for this block (because it contains many print statements) to get
1090 // interspersed with print data from other threads. We take this risk intentionally.
1091 // Getting stalled waiting for tty_lock while holding the CodeCache_lock is not desirable.
1092 printBox(ast, '-', "Global CodeHeap statistics for segment ", heapName);
1093 ast->print_cr("freeSpace = " SIZE_FORMAT_W(8) "k, nBlocks_free = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", freeSpace/(size_t)K, nBlocks_free, (100.0*freeSpace)/size, (100.0*freeSpace)/res_size);
1094 ast->print_cr("usedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_used = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", usedSpace/(size_t)K, nBlocks_used, (100.0*usedSpace)/size, (100.0*usedSpace)/res_size);
1095 ast->print_cr(" Tier1 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t1 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t1Space/(size_t)K, nBlocks_t1, (100.0*t1Space)/size, (100.0*t1Space)/res_size);
1096 ast->print_cr(" Tier2 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t2 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t2Space/(size_t)K, nBlocks_t2, (100.0*t2Space)/size, (100.0*t2Space)/res_size);
1097 ast->print_cr(" Alive Space = " SIZE_FORMAT_W(8) "k, nBlocks_alive = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K, nBlocks_alive, (100.0*aliveSpace)/size, (100.0*aliveSpace)/res_size);
1098 ast->print_cr(" disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K, nBlocks_disconn, (100.0*disconnSpace)/size, (100.0*disconnSpace)/res_size);
1099 ast->print_cr(" not entrant = " SIZE_FORMAT_W(8) "k, nBlocks_notentr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K, nBlocks_notentr, (100.0*notentrSpace)/size, (100.0*notentrSpace)/res_size);
1100 ast->print_cr(" inconstrSpace = " SIZE_FORMAT_W(8) "k, nBlocks_inconstr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", inconstrSpace/(size_t)K, nBlocks_inconstr, (100.0*inconstrSpace)/size, (100.0*inconstrSpace)/res_size);
1101 ast->print_cr(" unloadedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
1102 ast->print_cr(" deadSpace = " SIZE_FORMAT_W(8) "k, nBlocks_dead = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K, nBlocks_dead, (100.0*deadSpace)/size, (100.0*deadSpace)/res_size);
1103 ast->print_cr(" stubSpace = " SIZE_FORMAT_W(8) "k, nBlocks_stub = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K, nBlocks_stub, (100.0*stubSpace)/size, (100.0*stubSpace)/res_size);
1104 ast->print_cr("ZombieBlocks = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
1105 ast->cr();
1106 ast->print_cr("Segment start = " INTPTR_FORMAT ", used space = " SIZE_FORMAT_W(8)"k", p2i(low_bound), size/K);
1107 ast->print_cr("Segment end (used) = " INTPTR_FORMAT ", remaining space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + size, (res_size - size)/K);
1108 ast->print_cr("Segment end (reserved) = " INTPTR_FORMAT ", reserved space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + res_size, res_size/K);
1109 ast->cr();
1110 ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
1111 ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
1112 ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
1113 ast->cr();
1114
1115 int reset_val = NMethodSweeper::hotness_counter_reset_val();
1116 double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
1117 printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
1118 ast->print_cr("Highest possible method temperature: %12d", reset_val);
1119 ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
1120 if (n_methods > 0) {
1258 ast->print_cr("Free block count mismatch could not be resolved.");
1259 ast->print_cr("Try to run \"aggregate\" function to update counters");
1260 }
1261 BUFFEREDSTREAM_FLUSH("")
1262
1263 //---< discard old array and update global values >---
1264 discard_FreeArray(out);
1265 set_HeapStatGlobals(out, heapName);
1266 return;
1267 }
1268
1269 //---< calculate and fill remaining fields >---
1270 if (FreeArray != NULL) {
1271 // This loop is intentionally printing directly to "out".
1272 // It should not print anything, anyway.
1273 for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
1274 size_t lenSum = 0;
1275 FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
1276 for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
1277 CodeBlob *cb = (CodeBlob*)(heap->find_start(h));
1278 if ((cb != NULL) && !cb->is_nmethod()) {
1279 FreeArray[ix].stubs_in_gap = true;
1280 }
1281 FreeArray[ix].n_gapBlocks++;
1282 lenSum += h->length()<<log2_seg_size;
1283 if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
1284 out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
1285 }
1286 }
1287 if (lenSum != FreeArray[ix].gap) {
1288 out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
1289 }
1290 }
1291 }
1292 set_HeapStatGlobals(out, heapName);
1293
1294 printBox(ast, '=', "C O D E H E A P A N A L Y S I S C O M P L E T E for segment ", heapName);
1295 BUFFEREDSTREAM_FLUSH("\n")
1296 }
1297
1298
1310 BUFFEREDSTREAM_DECL(ast, out)
1311
1312 {
1313 printBox(ast, '=', "U S E D S P A C E S T A T I S T I C S for ", heapName);
1314 ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
1315 " and other identifying information with the block size data.\n"
1316 "\n"
1317 " Method names are dynamically retrieved from the code cache at print time.\n"
1318 " Due to the living nature of the code cache and because the CodeCache_lock\n"
1319 " is not continuously held, the displayed name might be wrong or no name\n"
1320 " might be found at all. The likelihood for that to happen increases\n"
1321 " over time passed between analysis and print step.\n", used_topSizeBlocks);
1322 BUFFEREDSTREAM_FLUSH_LOCKED("\n")
1323 }
1324
1325 //----------------------------
1326 //-- Print Top Used Blocks --
1327 //----------------------------
1328 {
1329 char* low_bound = heap->low_boundary();
1330 bool have_CodeCache_lock = CodeCache_lock->owned_by_self();
1331
1332 printBox(ast, '-', "Largest Used Blocks in ", heapName);
1333 print_blobType_legend(ast);
1334
1335 ast->fill_to(51);
1336 ast->print("%4s", "blob");
1337 ast->fill_to(56);
1338 ast->print("%9s", "compiler");
1339 ast->fill_to(66);
1340 ast->print_cr("%6s", "method");
1341 ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name");
1342 BUFFEREDSTREAM_FLUSH_LOCKED("")
1343
1344 //---< print Top Ten Used Blocks >---
1345 if (used_topSizeBlocks > 0) {
1346 unsigned int printed_topSizeBlocks = 0;
1347 for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
1348 printed_topSizeBlocks++;
1349 nmethod* nm = NULL;
1350 const char* blob_name = "unnamed blob or blob name unavailable";
1351 // heap->find_start() is safe. Only works on _segmap.
1352 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
1353 HeapBlock* heapBlock = TopSizeArray[i].start;
1354 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(heapBlock));
1355 bool blob_is_safe = blob_access_is_safe(this_blob, NULL);
1356 if (blob_is_safe) {
1357 //---< access these fields only if we own the CodeCache_lock >---
1358 if (have_CodeCache_lock) {
1359 blob_name = this_blob->name();
1360 nm = this_blob->as_nmethod_or_null();
1361 }
1362 //---< blob address >---
1363 ast->print(INTPTR_FORMAT, p2i(this_blob));
1364 ast->fill_to(19);
1365 //---< blob offset from CodeHeap begin >---
1366 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
1367 ast->fill_to(33);
1368 } else {
1369 //---< block address >---
1370 ast->print(INTPTR_FORMAT, p2i(TopSizeArray[i].start));
1371 ast->fill_to(19);
1372 //---< block offset from CodeHeap begin >---
1373 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
1374 ast->fill_to(33);
1375 }
1376
1377 //---< print size, name, and signature (for nMethods) >---
1378 // access nmethod and Method fields only if we own the CodeCache_lock.
1379 // This fact is implicitly transported via nm != NULL.
1380 if (CompiledMethod::nmethod_access_is_safe(nm)) {
1381 ResourceMark rm;
1382 Method* method = nm->method();
1383 if (nm->is_in_use()) {
1384 blob_name = method->name_and_sig_as_C_string();
1385 }
1386 if (nm->is_not_entrant()) {
1387 blob_name = method->name_and_sig_as_C_string();
1388 }
1389 //---< nMethod size in hex >---
1390 unsigned int total_size = nm->total_size();
1391 ast->print(PTR32_FORMAT, total_size);
1392 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
1393 ast->fill_to(51);
1394 ast->print(" %c", blobTypeChar[TopSizeArray[i].type]);
1395 //---< compiler information >---
1396 ast->fill_to(56);
1397 ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
1398 //---< method temperature >---
1399 ast->fill_to(67);
1400 ast->print("%5d", nm->hotness_counter());
1401 //---< name and signature >---
1402 ast->fill_to(67+6);
1403 if (nm->is_not_installed()) {
1404 ast->print(" not (yet) installed method ");
1405 }
1406 if (nm->is_zombie()) {
1407 ast->print(" zombie method ");
1408 }
1409 ast->print("%s", blob_name);
1410 } else {
1411 //---< block size in hex >---
1412 ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
1413 ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
1414 //---< no compiler information >---
1415 ast->fill_to(56);
1416 //---< name and signature >---
1417 ast->fill_to(67+6);
1418 ast->print("%s", blob_name);
1419 }
1420 ast->cr();
1421 BUFFEREDSTREAM_FLUSH_AUTO("")
1422 }
1423 if (used_topSizeBlocks != printed_topSizeBlocks) {
1424 ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
1425 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
1426 ast->print_cr(" TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
1427 BUFFEREDSTREAM_FLUSH_AUTO("")
1428 }
1429 }
1430 BUFFEREDSTREAM_FLUSH("\n\n")
1431 }
1432 }
1433
1434 //-----------------------------
1435 //-- Print Usage Histogram --
1436 //-----------------------------
1437
1438 if (SizeDistributionArray != NULL) {
2179 }
2180
2181
2182 void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
2183 if (!initialization_complete) {
2184 return;
2185 }
2186
2187 const char* heapName = get_heapName(heap);
2188 get_HeapStatGlobals(out, heapName);
2189
2190 if ((StatArray == NULL) || (alloc_granules == 0)) {
2191 return;
2192 }
2193 BUFFEREDSTREAM_DECL(ast, out)
2194
2195 unsigned int granules_per_line = 128;
2196 char* low_bound = heap->low_boundary();
2197 CodeBlob* last_blob = NULL;
2198 bool name_in_addr_range = true;
2199 bool have_CodeCache_lock = CodeCache_lock->owned_by_self();
2200
2201 //---< print at least 128K per block (i.e. between headers) >---
2202 if (granules_per_line*granule_size < 128*K) {
2203 granules_per_line = (unsigned int)((128*K)/granule_size);
2204 }
2205
2206 printBox(ast, '=', "M E T H O D N A M E S for ", heapName);
2207 ast->print_cr(" Method names are dynamically retrieved from the code cache at print time.\n"
2208 " Due to the living nature of the code heap and because the CodeCache_lock\n"
2209 " is not continuously held, the displayed name might be wrong or no name\n"
2210 " might be found at all. The likelihood for that to happen increases\n"
2211 " over time passed between aggregtion and print steps.\n");
2212 BUFFEREDSTREAM_FLUSH_LOCKED("")
2213
2214 for (unsigned int ix = 0; ix < alloc_granules; ix++) {
2215 //---< print a new blob on a new line >---
2216 if (ix%granules_per_line == 0) {
2217 if (!name_in_addr_range) {
2218 ast->print_cr("No methods, blobs, or stubs found in this address range");
2219 }
2220 name_in_addr_range = false;
2221
2222 size_t end_ix = (ix+granules_per_line <= alloc_granules) ? ix+granules_per_line : alloc_granules;
2223 ast->cr();
2224 ast->print_cr("--------------------------------------------------------------------");
2225 ast->print_cr("Address range [" INTPTR_FORMAT "," INTPTR_FORMAT "), " SIZE_FORMAT "k", p2i(low_bound+ix*granule_size), p2i(low_bound + end_ix*granule_size), (end_ix - ix)*granule_size/(size_t)K);
2226 ast->print_cr("--------------------------------------------------------------------");
2227 BUFFEREDSTREAM_FLUSH_AUTO("")
2228 }
2229 // Only check granule if it contains at least one blob.
2230 unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
2231 StatArray[ix].stub_count + StatArray[ix].dead_count;
2232 if (nBlobs > 0 ) {
2233 for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
2234 // heap->find_start() is safe. Only works on _segmap.
2235 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
2236 char* this_seg = low_bound + ix*granule_size + is;
2237 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(this_seg));
2238 bool blob_is_safe = blob_access_is_safe(this_blob, NULL);
2239 // blob could have been flushed, freed, and merged.
2240 // this_blob < last_blob is an indicator for that.
2241 if (blob_is_safe && (this_blob > last_blob)) {
2242 last_blob = this_blob;
2243
2244 //---< get type and name >---
2245 blobType cbType = noType;
2246 if (segment_granules) {
2247 cbType = (blobType)StatArray[ix].type;
2248 } else {
2249 //---< access these fields only if we own the CodeCache_lock >---
2250 if (have_CodeCache_lock) {
2251 cbType = get_cbType(this_blob);
2252 }
2253 }
2254
2255 //---< access these fields only if we own the CodeCache_lock >---
2256 const char* blob_name = "<unavailable>";
2257 nmethod* nm = NULL;
2258 if (have_CodeCache_lock) {
2259 blob_name = this_blob->name();
2260 nm = this_blob->as_nmethod_or_null();
2261 // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
2262 if ((blob_name == NULL) || !os::is_readable_pointer(blob_name)) {
2263 blob_name = "<unavailable>";
2264 }
2265 }
2266
2267 //---< print table header for new print range >---
2268 if (!name_in_addr_range) {
2269 name_in_addr_range = true;
2270 ast->fill_to(51);
2271 ast->print("%9s", "compiler");
2272 ast->fill_to(61);
2273 ast->print_cr("%6s", "method");
2274 ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
2275 BUFFEREDSTREAM_FLUSH_AUTO("")
2276 }
2277
2278 //---< print line prefix (address and offset from CodeHeap start) >---
2279 ast->print(INTPTR_FORMAT, p2i(this_blob));
2280 ast->fill_to(19);
2281 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
2282 ast->fill_to(33);
2283
2284 // access nmethod and Method fields only if we own the CodeCache_lock.
2285 // This fact is implicitly transported via nm != NULL.
2286 if (CompiledMethod::nmethod_access_is_safe(nm)) {
2287 Method* method = nm->method();
2288 ResourceMark rm;
2289 //---< collect all data to locals as quickly as possible >---
2290 unsigned int total_size = nm->total_size();
2291 int hotness = nm->hotness_counter();
2292 bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
2293 //---< nMethod size in hex >---
2294 ast->print(PTR32_FORMAT, total_size);
2295 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
2296 //---< compiler information >---
2297 ast->fill_to(51);
2298 ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
2299 //---< method temperature >---
2300 ast->fill_to(62);
2301 ast->print("%5d", hotness);
2302 //---< name and signature >---
2303 ast->fill_to(62+6);
2304 ast->print("%s", blobTypeName[cbType]);
2305 ast->fill_to(82+6);
2306 if (cbType == nMethod_dead) {
2472 ast->print("|");
2473 }
2474 ast->cr();
2475
2476 // can't use BUFFEREDSTREAM_FLUSH_IF("", 512) here.
2477 // can't use this expression. bufferedStream::capacity() does not exist.
2478 // if ((ast->capacity() - ast->size()) < 512) {
2479 // Assume instead that default bufferedStream capacity (4K) was used.
2480 if (ast->size() > 3*K) {
2481 ttyLocker ttyl;
2482 out->print("%s", ast->as_string());
2483 ast->reset();
2484 }
2485
2486 ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
2487 ast->fill_to(19);
2488 ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
2489 }
2490 }
2491
2492 CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
2493 if ((cb != NULL) && os::is_readable_pointer(cb)) {
2494 if (cb->is_runtime_stub()) return runtimeStub;
2495 if (cb->is_deoptimization_stub()) return deoptimizationStub;
2496 if (cb->is_uncommon_trap_stub()) return uncommonTrapStub;
2497 if (cb->is_exception_stub()) return exceptionStub;
2498 if (cb->is_safepoint_stub()) return safepointStub;
2499 if (cb->is_adapter_blob()) return adapterBlob;
2500 if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
2501 if (cb->is_buffer_blob()) return bufferBlob;
2502
2503 //---< access these fields only if we own the CodeCache_lock >---
2504 // Should be ensured by caller. aggregate() amd print_names() do that.
2505 if (CodeCache_lock->owned_by_self()) {
2506 nmethod* nm = cb->as_nmethod_or_null();
2507 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
2508 if (nm->is_not_installed()) return nMethod_inconstruction;
2509 if (nm->is_zombie()) return nMethod_dead;
2510 if (nm->is_unloaded()) return nMethod_unloaded;
2511 if (nm->is_in_use()) return nMethod_inuse;
2512 if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused;
2513 if (nm->is_alive()) return nMethod_alive;
2514 return nMethod_dead;
2515 }
2516 }
2517 }
2518 return noType;
2519 }
2520
2521 bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob, CodeBlob* prev_blob) {
2522 return (this_blob != NULL) && // a blob must have been found, obviously
2523 ((this_blob == prev_blob) || (prev_blob == NULL)) && // when re-checking, the same blob must have been found
2524 (this_blob->header_size() >= 0) &&
2525 (this_blob->relocation_size() >= 0) &&
2526 ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
2527 ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin())) &&
2528 os::is_readable_pointer((address)(this_blob->relocation_begin())) &&
2529 os::is_readable_pointer(this_blob->content_begin());
2530 }
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "code/codeHeapState.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "runtime/safepoint.hpp"
30 #include "runtime/sweeper.hpp"
31 #include "utilities/powerOfTwo.hpp"
32
33 // -------------------------
34 // | General Description |
35 // -------------------------
36 // The CodeHeap state analytics are divided in two parts.
37 // The first part examines the entire CodeHeap and aggregates all
38 // information that is believed useful/important.
39 //
40 // Aggregation condenses the information of a piece of the CodeHeap
41 // (4096 bytes by default) into an analysis granule. These granules
42 // contain enough detail to gain initial insight while keeping the
43 // internal structure sizes in check.
44 //
45 // The second part, which consists of several, independent steps,
46 // prints the previously collected information with emphasis on
47 // various aspects.
48 //
49 // The CodeHeap is a living thing. Therefore, protection against concurrent
194 #define BUFFEREDSTREAM_FLUSH(_termString) \
195 if (((_termString) != NULL) && (strlen(_termString) > 0)){\
196 _outbuf->print("%s", _termString); \
197 }
198
199 #define BUFFEREDSTREAM_FLUSH_IF(_termString, _remSize) \
200 BUFFEREDSTREAM_FLUSH(_termString)
201
202 #define BUFFEREDSTREAM_FLUSH_AUTO(_termString) \
203 BUFFEREDSTREAM_FLUSH(_termString)
204
205 #define BUFFEREDSTREAM_FLUSH_LOCKED(_termString) \
206 BUFFEREDSTREAM_FLUSH(_termString)
207
208 #define BUFFEREDSTREAM_FLUSH_STAT()
209 #endif
210 #define HEX32_FORMAT "0x%x" // just a helper format string used below multiple times
211
212 const char blobTypeChar[] = {' ', 'C', 'N', 'I', 'X', 'Z', 'U', 'R', '?', 'D', 'T', 'E', 'S', 'A', 'M', 'B', 'L' };
213 const char* blobTypeName[] = {"noType"
214 , "nMethod (under construction), cannot be observed"
215 , "nMethod (active)"
216 , "nMethod (inactive)"
217 , "nMethod (deopt)"
218 , "nMethod (zombie)"
219 , "nMethod (unloaded)"
220 , "runtime stub"
221 , "ricochet stub"
222 , "deopt stub"
223 , "uncommon trap stub"
224 , "exception stub"
225 , "safepoint stub"
226 , "adapter blob"
227 , "MH adapter blob"
228 , "buffer blob"
229 , "lastType"
230 };
231 const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
232
233 // Be prepared for ten different CodeHeap segments. Should be enough for a few years.
234 const unsigned int nSizeDistElements = 31; // logarithmic range growth, max size: 2**32
235 const unsigned int maxTopSizeBlocks = 100;
236 const unsigned int tsbStopper = 2 * maxTopSizeBlocks;
237 const unsigned int maxHeaps = 10;
238 static unsigned int nHeaps = 0;
239 static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
240
241 // static struct StatElement *StatArray = NULL;
242 static StatElement* StatArray = NULL;
243 static int log2_seg_size = 0;
244 static size_t seg_size = 0;
245 static size_t alloc_granules = 0;
246 static size_t granule_size = 0;
247 static bool segment_granules = false;
248 static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only.
249 static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only.
250 static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only.
251 static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only.
252 static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state.
253 static unsigned int nBlocks_stub = 0;
254
255 static struct FreeBlk* FreeArray = NULL;
256 static unsigned int alloc_freeBlocks = 0;
257
258 static struct TopSizeBlk* TopSizeArray = NULL;
259 static unsigned int alloc_topSizeBlocks = 0;
260 static unsigned int used_topSizeBlocks = 0;
261
262 static struct SizeDistributionElement* SizeDistributionArray = NULL;
263
264 // nMethod temperature (hotness) indicators.
265 static int avgTemp = 0;
266 static int maxTemp = 0;
267 static int minTemp = 0;
268
269 static unsigned int latest_compilation_id = 0;
270 static volatile bool initialization_complete = false;
271
302 } else {
303 nHeaps = 1;
304 CodeHeapStatArray[0].heapName = heapName;
305 return 0; // This is the default index if CodeCache is not segmented.
306 }
307 }
308
309 void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) {
310 unsigned int ix = findHeapIndex(out, heapName);
311 if (ix < maxHeaps) {
312 StatArray = CodeHeapStatArray[ix].StatArray;
313 seg_size = CodeHeapStatArray[ix].segment_size;
314 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size);
315 alloc_granules = CodeHeapStatArray[ix].alloc_granules;
316 granule_size = CodeHeapStatArray[ix].granule_size;
317 segment_granules = CodeHeapStatArray[ix].segment_granules;
318 nBlocks_t1 = CodeHeapStatArray[ix].nBlocks_t1;
319 nBlocks_t2 = CodeHeapStatArray[ix].nBlocks_t2;
320 nBlocks_alive = CodeHeapStatArray[ix].nBlocks_alive;
321 nBlocks_dead = CodeHeapStatArray[ix].nBlocks_dead;
322 nBlocks_unloaded = CodeHeapStatArray[ix].nBlocks_unloaded;
323 nBlocks_stub = CodeHeapStatArray[ix].nBlocks_stub;
324 FreeArray = CodeHeapStatArray[ix].FreeArray;
325 alloc_freeBlocks = CodeHeapStatArray[ix].alloc_freeBlocks;
326 TopSizeArray = CodeHeapStatArray[ix].TopSizeArray;
327 alloc_topSizeBlocks = CodeHeapStatArray[ix].alloc_topSizeBlocks;
328 used_topSizeBlocks = CodeHeapStatArray[ix].used_topSizeBlocks;
329 SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
330 avgTemp = CodeHeapStatArray[ix].avgTemp;
331 maxTemp = CodeHeapStatArray[ix].maxTemp;
332 minTemp = CodeHeapStatArray[ix].minTemp;
333 } else {
334 StatArray = NULL;
335 seg_size = 0;
336 log2_seg_size = 0;
337 alloc_granules = 0;
338 granule_size = 0;
339 segment_granules = false;
340 nBlocks_t1 = 0;
341 nBlocks_t2 = 0;
342 nBlocks_alive = 0;
343 nBlocks_dead = 0;
344 nBlocks_unloaded = 0;
345 nBlocks_stub = 0;
346 FreeArray = NULL;
347 alloc_freeBlocks = 0;
348 TopSizeArray = NULL;
349 alloc_topSizeBlocks = 0;
350 used_topSizeBlocks = 0;
351 SizeDistributionArray = NULL;
352 avgTemp = 0;
353 maxTemp = 0;
354 minTemp = 0;
355 }
356 }
357
358 void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) {
359 unsigned int ix = findHeapIndex(out, heapName);
360 if (ix < maxHeaps) {
361 CodeHeapStatArray[ix].StatArray = StatArray;
362 CodeHeapStatArray[ix].segment_size = seg_size;
363 CodeHeapStatArray[ix].alloc_granules = alloc_granules;
364 CodeHeapStatArray[ix].granule_size = granule_size;
365 CodeHeapStatArray[ix].segment_granules = segment_granules;
366 CodeHeapStatArray[ix].nBlocks_t1 = nBlocks_t1;
367 CodeHeapStatArray[ix].nBlocks_t2 = nBlocks_t2;
368 CodeHeapStatArray[ix].nBlocks_alive = nBlocks_alive;
369 CodeHeapStatArray[ix].nBlocks_dead = nBlocks_dead;
370 CodeHeapStatArray[ix].nBlocks_unloaded = nBlocks_unloaded;
371 CodeHeapStatArray[ix].nBlocks_stub = nBlocks_stub;
372 CodeHeapStatArray[ix].FreeArray = FreeArray;
373 CodeHeapStatArray[ix].alloc_freeBlocks = alloc_freeBlocks;
374 CodeHeapStatArray[ix].TopSizeArray = TopSizeArray;
375 CodeHeapStatArray[ix].alloc_topSizeBlocks = alloc_topSizeBlocks;
376 CodeHeapStatArray[ix].used_topSizeBlocks = used_topSizeBlocks;
377 CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
378 CodeHeapStatArray[ix].avgTemp = avgTemp;
379 CodeHeapStatArray[ix].maxTemp = maxTemp;
380 CodeHeapStatArray[ix].minTemp = minTemp;
381 }
382 }
383
384 //---< get a new statistics array >---
385 void CodeHeapState::prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName) {
386 if (StatArray == NULL) {
387 StatArray = new StatElement[nElem];
388 //---< reset some counts >---
389 alloc_granules = nElem;
476
477 void CodeHeapState::discard_StatArray(outputStream* out) {
478 if (StatArray != NULL) {
479 delete StatArray;
480 StatArray = NULL;
481 alloc_granules = 0;
482 granule_size = 0;
483 }
484 }
485
486 void CodeHeapState::discard_FreeArray(outputStream* out) {
487 if (FreeArray != NULL) {
488 delete[] FreeArray;
489 FreeArray = NULL;
490 alloc_freeBlocks = 0;
491 }
492 }
493
494 void CodeHeapState::discard_TopSizeArray(outputStream* out) {
495 if (TopSizeArray != NULL) {
496 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
497 if (TopSizeArray[i].blob_name != NULL) {
498 os::free((void*)TopSizeArray[i].blob_name);
499 }
500 }
501 delete[] TopSizeArray;
502 TopSizeArray = NULL;
503 alloc_topSizeBlocks = 0;
504 used_topSizeBlocks = 0;
505 }
506 }
507
508 void CodeHeapState::discard_SizeDistArray(outputStream* out) {
509 if (SizeDistributionArray != NULL) {
510 delete[] SizeDistributionArray;
511 SizeDistributionArray = NULL;
512 }
513 }
514
515 // Discard all allocated internal data structures.
516 // This should be done after an analysis session is completed.
517 void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
518 if (!initialization_complete) {
519 return;
520 }
574 BUFFEREDSTREAM_FLUSH("")
575 }
576 get_HeapStatGlobals(out, heapName);
577
578
579 // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
580 // all heap information is "constant" and can be safely extracted/calculated before we
581 // enter the while() loop. Actually, the loop will only be iterated once.
582 char* low_bound = heap->low_boundary();
583 size_t size = heap->capacity();
584 size_t res_size = heap->max_capacity();
585 seg_size = heap->segment_size();
586 log2_seg_size = seg_size == 0 ? 0 : exact_log2(seg_size); // This is a global static value.
587
588 if (seg_size == 0) {
589 printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
590 BUFFEREDSTREAM_FLUSH("")
591 return;
592 }
593
594 if (!holding_required_locks()) {
595 printBox(ast, '-', "Must be at safepoint or hold Compile_lock and CodeCache_lock when calling aggregate function for ", heapName);
596 BUFFEREDSTREAM_FLUSH("")
597 return;
598 }
599
600 // Calculate granularity of analysis (and output).
601 // The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
602 // The CodeHeap can become fairly large, in particular in productive real-life systems.
603 //
604 // It is often neither feasible nor desirable to aggregate the data with the highest possible
605 // level of detail, i.e. inspecting and printing each segment on its own.
606 //
607 // The granularity parameter allows to specify the level of detail available in the analysis.
608 // It must be a positive multiple of the segment size and should be selected such that enough
609 // detail is provided while, at the same time, the printed output does not explode.
610 //
611 // By manipulating the granularity value, we enforce that at least min_granules units
612 // of analysis are available. We also enforce an upper limit of max_granules units to
613 // keep the amount of allocated storage in check.
614 //
615 // Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
642 " Subsequent print functions create their output based on this snapshot.\n"
643 " The CodeHeap is a living thing, and every effort has been made for the\n"
644 " collected data to be consistent. Only the method names and signatures\n"
645 " are retrieved at print time. That may lead to rare cases where the\n"
646 " name of a method is no longer available, e.g. because it was unloaded.\n");
647 ast->print_cr(" CodeHeap committed size " SIZE_FORMAT "K (" SIZE_FORMAT "M), reserved size " SIZE_FORMAT "K (" SIZE_FORMAT "M), %d%% occupied.",
648 size/(size_t)K, size/(size_t)M, res_size/(size_t)K, res_size/(size_t)M, (unsigned int)(100.0*size/res_size));
649 ast->print_cr(" CodeHeap allocation segment size is " SIZE_FORMAT " bytes. This is the smallest possible granularity.", seg_size);
650 ast->print_cr(" CodeHeap (committed part) is mapped to " SIZE_FORMAT " granules of size " SIZE_FORMAT " bytes.", granules, granularity);
651 ast->print_cr(" Each granule takes " SIZE_FORMAT " bytes of C heap, that is " SIZE_FORMAT "K in total for statistics data.", sizeof(StatElement), (sizeof(StatElement)*granules)/(size_t)K);
652 ast->print_cr(" The number of granules is limited to %dk, requiring a granules size of at least %d bytes for a 1GB heap.", (unsigned int)(max_granules/K), (unsigned int)(G/max_granules));
653 BUFFEREDSTREAM_FLUSH("\n")
654
655
656 while (!done) {
657 //---< reset counters with every aggregation >---
658 nBlocks_t1 = 0;
659 nBlocks_t2 = 0;
660 nBlocks_alive = 0;
661 nBlocks_dead = 0;
662 nBlocks_unloaded = 0;
663 nBlocks_stub = 0;
664
665 nBlocks_free = 0;
666 nBlocks_used = 0;
667 nBlocks_zomb = 0;
668 nBlocks_disconn = 0;
669 nBlocks_notentr = 0;
670
671 //---< discard old arrays if size does not match >---
672 if (granules != alloc_granules) {
673 discard_StatArray(out);
674 discard_TopSizeArray(out);
675 }
676
677 //---< allocate arrays if they don't yet exist, initialize >---
678 prepare_StatArray(out, granules, granularity, heapName);
679 if (StatArray == NULL) {
680 set_HeapStatGlobals(out, heapName);
681 return;
682 }
683 prepare_TopSizeArray(out, maxTopSizeBlocks, heapName);
684 prepare_SizeDistArray(out, nSizeDistElements, heapName);
685
686 latest_compilation_id = CompileBroker::get_compilation_id();
687 unsigned int highest_compilation_id = 0;
688 size_t usedSpace = 0;
689 size_t t1Space = 0;
690 size_t t2Space = 0;
691 size_t aliveSpace = 0;
692 size_t disconnSpace = 0;
693 size_t notentrSpace = 0;
694 size_t deadSpace = 0;
695 size_t unloadedSpace = 0;
696 size_t stubSpace = 0;
697 size_t freeSpace = 0;
698 size_t maxFreeSize = 0;
699 HeapBlock* maxFreeBlock = NULL;
700 bool insane = false;
701
702 int64_t hotnessAccumulator = 0;
703 unsigned int n_methods = 0;
704 avgTemp = 0;
705 minTemp = (int)(res_size > M ? (res_size/M)*2 : 1);
706 maxTemp = -minTemp;
707
708 for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
709 unsigned int hb_len = (unsigned int)h->length(); // despite being size_t, length can never overflow an unsigned int.
710 size_t hb_bytelen = ((size_t)hb_len)<<log2_seg_size;
711 unsigned int ix_beg = (unsigned int)(((char*)h-low_bound)/granule_size);
712 unsigned int ix_end = (unsigned int)(((char*)h-low_bound+(hb_bytelen-1))/granule_size);
713 unsigned int compile_id = 0;
714 CompLevel comp_lvl = CompLevel_none;
736 if (ix_beg > ix_end) {
737 insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
738 }
739 if (insane) {
740 BUFFEREDSTREAM_FLUSH("")
741 continue;
742 }
743
744 if (h->free()) {
745 nBlocks_free++;
746 freeSpace += hb_bytelen;
747 if (hb_bytelen > maxFreeSize) {
748 maxFreeSize = hb_bytelen;
749 maxFreeBlock = h;
750 }
751 } else {
752 update_SizeDistArray(out, hb_len);
753 nBlocks_used++;
754 usedSpace += hb_bytelen;
755 CodeBlob* cb = (CodeBlob*)heap->find_start(h);
756 cbType = get_cbType(cb); // Will check for cb == NULL and other safety things.
757 if (cbType != noType) {
758 const char* blob_name = os::strdup(cb->name());
759 unsigned int nm_size = 0;
760 int temperature = 0;
761 nmethod* nm = cb->as_nmethod_or_null();
762 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
763 ResourceMark rm;
764 Method* method = nm->method();
765 if (nm->is_in_use()) {
766 blob_name = os::strdup(method->name_and_sig_as_C_string());
767 }
768 if (nm->is_not_entrant()) {
769 blob_name = os::strdup(method->name_and_sig_as_C_string());
770 }
771
772 nm_size = nm->total_size();
773 compile_id = nm->compile_id();
774 comp_lvl = (CompLevel)(nm->comp_level());
775 if (nm->is_compiled_by_c1()) {
776 cType = c1;
777 }
778 if (nm->is_compiled_by_c2()) {
779 cType = c2;
780 }
781 if (nm->is_compiled_by_jvmci()) {
782 cType = jvmci;
783 }
784 switch (cbType) {
785 case nMethod_inuse: { // only for executable methods!!!
786 // space for these cbs is accounted for later.
787 temperature = nm->hotness_counter();
788 hotnessAccumulator += temperature;
789 n_methods++;
790 maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
791 minTemp = (temperature < minTemp) ? temperature : minTemp;
792 break;
793 }
794 case nMethod_notused:
795 nBlocks_alive++;
796 nBlocks_disconn++;
797 aliveSpace += hb_bytelen;
798 disconnSpace += hb_bytelen;
799 break;
800 case nMethod_notentrant: // equivalent to nMethod_alive
801 nBlocks_alive++;
802 nBlocks_notentr++;
803 aliveSpace += hb_bytelen;
804 notentrSpace += hb_bytelen;
805 break;
806 case nMethod_unloaded:
807 nBlocks_unloaded++;
808 unloadedSpace += hb_bytelen;
809 break;
810 case nMethod_dead:
811 nBlocks_dead++;
812 deadSpace += hb_bytelen;
813 break;
814 default:
815 break;
816 }
817 }
818
819 //------------------------------------------
820 //---< register block in TopSizeArray >---
821 //------------------------------------------
822 if (alloc_topSizeBlocks > 0) {
823 if (used_topSizeBlocks == 0) {
824 TopSizeArray[0].start = h;
825 TopSizeArray[0].blob_name = blob_name;
826 TopSizeArray[0].len = hb_len;
827 TopSizeArray[0].index = tsbStopper;
828 TopSizeArray[0].nm_size = nm_size;
829 TopSizeArray[0].temperature = temperature;
830 TopSizeArray[0].compiler = cType;
831 TopSizeArray[0].level = comp_lvl;
832 TopSizeArray[0].type = cbType;
833 currMax = hb_len;
834 currMin = hb_len;
835 currMin_ix = 0;
836 used_topSizeBlocks++;
837 blob_name = NULL; // indicate blob_name was consumed
838 // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
839 } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
840 //---< all blocks in list are larger, but there is room left in array >---
841 TopSizeArray[currMin_ix].index = used_topSizeBlocks;
842 TopSizeArray[used_topSizeBlocks].start = h;
843 TopSizeArray[used_topSizeBlocks].blob_name = blob_name;
844 TopSizeArray[used_topSizeBlocks].len = hb_len;
845 TopSizeArray[used_topSizeBlocks].index = tsbStopper;
846 TopSizeArray[used_topSizeBlocks].nm_size = nm_size;
847 TopSizeArray[used_topSizeBlocks].temperature = temperature;
848 TopSizeArray[used_topSizeBlocks].compiler = cType;
849 TopSizeArray[used_topSizeBlocks].level = comp_lvl;
850 TopSizeArray[used_topSizeBlocks].type = cbType;
851 currMin = hb_len;
852 currMin_ix = used_topSizeBlocks;
853 used_topSizeBlocks++;
854 blob_name = NULL; // indicate blob_name was consumed
855 } else {
856 // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
857 // We don't need to search the list if we know beforehand that the current block size is
858 // smaller than the currently recorded minimum and there is no free entry left in the list.
859 if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
860 if (currMax < hb_len) {
861 currMax = hb_len;
862 }
863 unsigned int i;
864 unsigned int prev_i = tsbStopper;
865 unsigned int limit_i = 0;
866 for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
867 if (limit_i++ >= alloc_topSizeBlocks) {
868 insane = true; break; // emergency exit
869 }
870 if (i >= used_topSizeBlocks) {
871 insane = true; break; // emergency exit
872 }
873 total_iterations++;
874 if (TopSizeArray[i].len < hb_len) {
875 //---< We want to insert here, element <i> is smaller than the current one >---
876 if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
877 // old entry gets moved to the next free element of the array.
878 // That's necessary to keep the entry for the largest block at index 0.
879 // This move might cause the current minimum to be moved to another place
880 if (i == currMin_ix) {
881 assert(TopSizeArray[i].len == currMin, "sort error");
882 currMin_ix = used_topSizeBlocks;
883 }
884 memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
885 TopSizeArray[i].start = h;
886 TopSizeArray[i].blob_name = blob_name;
887 TopSizeArray[i].len = hb_len;
888 TopSizeArray[i].index = used_topSizeBlocks;
889 TopSizeArray[i].nm_size = nm_size;
890 TopSizeArray[i].temperature = temperature;
891 TopSizeArray[i].compiler = cType;
892 TopSizeArray[i].level = comp_lvl;
893 TopSizeArray[i].type = cbType;
894 used_topSizeBlocks++;
895 blob_name = NULL; // indicate blob_name was consumed
896 } else { // no room for new entries, current block replaces entry for smallest block
897 //---< Find last entry (entry for smallest remembered block) >---
898 // We either want to insert right before the smallest entry, which is when <i>
899 // indexes the smallest entry. We then just overwrite the smallest entry.
900 // What's more likely:
901 // We want to insert somewhere in the list. The smallest entry (@<j>) then falls off the cliff.
902 // The element at the insert point <i> takes it's slot. The second-smallest entry now becomes smallest.
903 // Data of the current block is filled in at index <i>.
904 unsigned int j = i;
905 unsigned int prev_j = tsbStopper;
906 unsigned int limit_j = 0;
907 while (TopSizeArray[j].index != tsbStopper) {
908 if (limit_j++ >= alloc_topSizeBlocks) {
909 insane = true; break; // emergency exit
910 }
911 if (j >= used_topSizeBlocks) {
912 insane = true; break; // emergency exit
913 }
914 total_iterations++;
915 prev_j = j;
916 j = TopSizeArray[j].index;
917 }
918 if (!insane) {
919 if (TopSizeArray[j].blob_name != NULL) {
920 os::free((void*)TopSizeArray[j].blob_name);
921 }
922 if (prev_j == tsbStopper) {
923 //---< Above while loop did not iterate, we already are the min entry >---
924 //---< We have to just replace the smallest entry >---
925 currMin = hb_len;
926 currMin_ix = j;
927 TopSizeArray[j].start = h;
928 TopSizeArray[j].blob_name = blob_name;
929 TopSizeArray[j].len = hb_len;
930 TopSizeArray[j].index = tsbStopper; // already set!!
931 TopSizeArray[i].nm_size = nm_size;
932 TopSizeArray[i].temperature = temperature;
933 TopSizeArray[j].compiler = cType;
934 TopSizeArray[j].level = comp_lvl;
935 TopSizeArray[j].type = cbType;
936 } else {
937 //---< second-smallest entry is now smallest >---
938 TopSizeArray[prev_j].index = tsbStopper;
939 currMin = TopSizeArray[prev_j].len;
940 currMin_ix = prev_j;
941 //---< previously smallest entry gets overwritten >---
942 memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
943 TopSizeArray[i].start = h;
944 TopSizeArray[i].blob_name = blob_name;
945 TopSizeArray[i].len = hb_len;
946 TopSizeArray[i].index = j;
947 TopSizeArray[i].nm_size = nm_size;
948 TopSizeArray[i].temperature = temperature;
949 TopSizeArray[i].compiler = cType;
950 TopSizeArray[i].level = comp_lvl;
951 TopSizeArray[i].type = cbType;
952 }
953 blob_name = NULL; // indicate blob_name was consumed
954 } // insane
955 }
956 break;
957 }
958 prev_i = i;
959 }
960 if (insane) {
961 // Note: regular analysis could probably continue by resetting "insane" flag.
962 out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
963 discard_TopSizeArray(out);
964 }
965 }
966 }
967 }
968 if (blob_name != NULL) {
969 os::free((void*)blob_name);
970 blob_name = NULL;
971 }
972 //----------------------------------------------
973 //---< END register block in TopSizeArray >---
974 //----------------------------------------------
975 } else {
976 nBlocks_zomb++;
977 }
978
979 if (ix_beg == ix_end) {
980 StatArray[ix_beg].type = cbType;
981 switch (cbType) {
982 case nMethod_inuse:
983 highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
984 if (comp_lvl < CompLevel_full_optimization) {
985 nBlocks_t1++;
986 t1Space += hb_bytelen;
987 StatArray[ix_beg].t1_count++;
988 StatArray[ix_beg].t1_space += (unsigned short)hb_len;
989 StatArray[ix_beg].t1_age = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
990 } else {
991 nBlocks_t2++;
992 t2Space += hb_bytelen;
993 StatArray[ix_beg].t2_count++;
994 StatArray[ix_beg].t2_space += (unsigned short)hb_len;
995 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
996 }
997 StatArray[ix_beg].level = comp_lvl;
998 StatArray[ix_beg].compiler = cType;
999 break;
1000 case nMethod_alive:
1001 StatArray[ix_beg].tx_count++;
1002 StatArray[ix_beg].tx_space += (unsigned short)hb_len;
1003 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
1004 StatArray[ix_beg].level = comp_lvl;
1005 StatArray[ix_beg].compiler = cType;
1006 break;
1007 case nMethod_dead:
1008 case nMethod_unloaded:
1009 StatArray[ix_beg].dead_count++;
1010 StatArray[ix_beg].dead_space += (unsigned short)hb_len;
1011 break;
1012 default:
1013 // must be a stub, if it's not a dead or alive nMethod
1014 nBlocks_stub++;
1015 stubSpace += hb_bytelen;
1016 StatArray[ix_beg].stub_count++;
1017 StatArray[ix_beg].stub_space += (unsigned short)hb_len;
1018 break;
1019 }
1036
1037 StatArray[ix_end].t1_count++;
1038 StatArray[ix_end].t1_space += (unsigned short)end_space;
1039 StatArray[ix_end].t1_age = StatArray[ix_end].t1_age < compile_id ? compile_id : StatArray[ix_end].t1_age;
1040 } else {
1041 nBlocks_t2++;
1042 t2Space += hb_bytelen;
1043 StatArray[ix_beg].t2_count++;
1044 StatArray[ix_beg].t2_space += (unsigned short)beg_space;
1045 StatArray[ix_beg].t2_age = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
1046
1047 StatArray[ix_end].t2_count++;
1048 StatArray[ix_end].t2_space += (unsigned short)end_space;
1049 StatArray[ix_end].t2_age = StatArray[ix_end].t2_age < compile_id ? compile_id : StatArray[ix_end].t2_age;
1050 }
1051 StatArray[ix_beg].level = comp_lvl;
1052 StatArray[ix_beg].compiler = cType;
1053 StatArray[ix_end].level = comp_lvl;
1054 StatArray[ix_end].compiler = cType;
1055 break;
1056 case nMethod_alive:
1057 StatArray[ix_beg].tx_count++;
1058 StatArray[ix_beg].tx_space += (unsigned short)beg_space;
1059 StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
1060
1061 StatArray[ix_end].tx_count++;
1062 StatArray[ix_end].tx_space += (unsigned short)end_space;
1063 StatArray[ix_end].tx_age = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
1064
1065 StatArray[ix_beg].level = comp_lvl;
1066 StatArray[ix_beg].compiler = cType;
1067 StatArray[ix_end].level = comp_lvl;
1068 StatArray[ix_end].compiler = cType;
1069 break;
1070 case nMethod_dead:
1071 case nMethod_unloaded:
1072 StatArray[ix_beg].dead_count++;
1073 StatArray[ix_beg].dead_space += (unsigned short)beg_space;
1074 StatArray[ix_end].dead_count++;
1075 StatArray[ix_end].dead_space += (unsigned short)end_space;
1083 StatArray[ix_end].stub_count++;
1084 StatArray[ix_end].stub_space += (unsigned short)end_space;
1085 break;
1086 }
1087 for (unsigned int ix = ix_beg+1; ix < ix_end; ix++) {
1088 StatArray[ix].type = cbType;
1089 switch (cbType) {
1090 case nMethod_inuse:
1091 if (comp_lvl < CompLevel_full_optimization) {
1092 StatArray[ix].t1_count++;
1093 StatArray[ix].t1_space += (unsigned short)(granule_size>>log2_seg_size);
1094 StatArray[ix].t1_age = StatArray[ix].t1_age < compile_id ? compile_id : StatArray[ix].t1_age;
1095 } else {
1096 StatArray[ix].t2_count++;
1097 StatArray[ix].t2_space += (unsigned short)(granule_size>>log2_seg_size);
1098 StatArray[ix].t2_age = StatArray[ix].t2_age < compile_id ? compile_id : StatArray[ix].t2_age;
1099 }
1100 StatArray[ix].level = comp_lvl;
1101 StatArray[ix].compiler = cType;
1102 break;
1103 case nMethod_alive:
1104 StatArray[ix].tx_count++;
1105 StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
1106 StatArray[ix].tx_age = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
1107 StatArray[ix].level = comp_lvl;
1108 StatArray[ix].compiler = cType;
1109 break;
1110 case nMethod_dead:
1111 case nMethod_unloaded:
1112 StatArray[ix].dead_count++;
1113 StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
1114 break;
1115 default:
1116 // must be a stub, if it's not a dead or alive nMethod
1117 StatArray[ix].stub_count++;
1118 StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
1119 break;
1120 }
1121 }
1122 }
1123 }
1124 }
1125 done = true;
1126
1127 if (!insane) {
1128 // There is a risk for this block (because it contains many print statements) to get
1129 // interspersed with print data from other threads. We take this risk intentionally.
1130 // Getting stalled waiting for tty_lock while holding the CodeCache_lock is not desirable.
1131 printBox(ast, '-', "Global CodeHeap statistics for segment ", heapName);
1132 ast->print_cr("freeSpace = " SIZE_FORMAT_W(8) "k, nBlocks_free = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", freeSpace/(size_t)K, nBlocks_free, (100.0*freeSpace)/size, (100.0*freeSpace)/res_size);
1133 ast->print_cr("usedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_used = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", usedSpace/(size_t)K, nBlocks_used, (100.0*usedSpace)/size, (100.0*usedSpace)/res_size);
1134 ast->print_cr(" Tier1 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t1 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t1Space/(size_t)K, nBlocks_t1, (100.0*t1Space)/size, (100.0*t1Space)/res_size);
1135 ast->print_cr(" Tier2 Space = " SIZE_FORMAT_W(8) "k, nBlocks_t2 = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t2Space/(size_t)K, nBlocks_t2, (100.0*t2Space)/size, (100.0*t2Space)/res_size);
1136 ast->print_cr(" Alive Space = " SIZE_FORMAT_W(8) "k, nBlocks_alive = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K, nBlocks_alive, (100.0*aliveSpace)/size, (100.0*aliveSpace)/res_size);
1137 ast->print_cr(" disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K, nBlocks_disconn, (100.0*disconnSpace)/size, (100.0*disconnSpace)/res_size);
1138 ast->print_cr(" not entrant = " SIZE_FORMAT_W(8) "k, nBlocks_notentr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K, nBlocks_notentr, (100.0*notentrSpace)/size, (100.0*notentrSpace)/res_size);
1139 ast->print_cr(" unloadedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
1140 ast->print_cr(" deadSpace = " SIZE_FORMAT_W(8) "k, nBlocks_dead = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K, nBlocks_dead, (100.0*deadSpace)/size, (100.0*deadSpace)/res_size);
1141 ast->print_cr(" stubSpace = " SIZE_FORMAT_W(8) "k, nBlocks_stub = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K, nBlocks_stub, (100.0*stubSpace)/size, (100.0*stubSpace)/res_size);
1142 ast->print_cr("ZombieBlocks = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
1143 ast->cr();
1144 ast->print_cr("Segment start = " INTPTR_FORMAT ", used space = " SIZE_FORMAT_W(8)"k", p2i(low_bound), size/K);
1145 ast->print_cr("Segment end (used) = " INTPTR_FORMAT ", remaining space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + size, (res_size - size)/K);
1146 ast->print_cr("Segment end (reserved) = " INTPTR_FORMAT ", reserved space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + res_size, res_size/K);
1147 ast->cr();
1148 ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
1149 ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
1150 ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
1151 ast->cr();
1152
1153 int reset_val = NMethodSweeper::hotness_counter_reset_val();
1154 double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
1155 printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
1156 ast->print_cr("Highest possible method temperature: %12d", reset_val);
1157 ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
1158 if (n_methods > 0) {
1296 ast->print_cr("Free block count mismatch could not be resolved.");
1297 ast->print_cr("Try to run \"aggregate\" function to update counters");
1298 }
1299 BUFFEREDSTREAM_FLUSH("")
1300
1301 //---< discard old array and update global values >---
1302 discard_FreeArray(out);
1303 set_HeapStatGlobals(out, heapName);
1304 return;
1305 }
1306
1307 //---< calculate and fill remaining fields >---
1308 if (FreeArray != NULL) {
1309 // This loop is intentionally printing directly to "out".
1310 // It should not print anything, anyway.
1311 for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
1312 size_t lenSum = 0;
1313 FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
1314 for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
1315 CodeBlob *cb = (CodeBlob*)(heap->find_start(h));
1316 if ((cb != NULL) && !cb->is_nmethod()) { // checks equivalent to those in get_cbType()
1317 FreeArray[ix].stubs_in_gap = true;
1318 }
1319 FreeArray[ix].n_gapBlocks++;
1320 lenSum += h->length()<<log2_seg_size;
1321 if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
1322 out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
1323 }
1324 }
1325 if (lenSum != FreeArray[ix].gap) {
1326 out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
1327 }
1328 }
1329 }
1330 set_HeapStatGlobals(out, heapName);
1331
1332 printBox(ast, '=', "C O D E H E A P A N A L Y S I S C O M P L E T E for segment ", heapName);
1333 BUFFEREDSTREAM_FLUSH("\n")
1334 }
1335
1336
1348 BUFFEREDSTREAM_DECL(ast, out)
1349
1350 {
1351 printBox(ast, '=', "U S E D S P A C E S T A T I S T I C S for ", heapName);
1352 ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
1353 " and other identifying information with the block size data.\n"
1354 "\n"
1355 " Method names are dynamically retrieved from the code cache at print time.\n"
1356 " Due to the living nature of the code cache and because the CodeCache_lock\n"
1357 " is not continuously held, the displayed name might be wrong or no name\n"
1358 " might be found at all. The likelihood for that to happen increases\n"
1359 " over time passed between analysis and print step.\n", used_topSizeBlocks);
1360 BUFFEREDSTREAM_FLUSH_LOCKED("\n")
1361 }
1362
1363 //----------------------------
1364 //-- Print Top Used Blocks --
1365 //----------------------------
1366 {
1367 char* low_bound = heap->low_boundary();
1368
1369 printBox(ast, '-', "Largest Used Blocks in ", heapName);
1370 print_blobType_legend(ast);
1371
1372 ast->fill_to(51);
1373 ast->print("%4s", "blob");
1374 ast->fill_to(56);
1375 ast->print("%9s", "compiler");
1376 ast->fill_to(66);
1377 ast->print_cr("%6s", "method");
1378 ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name");
1379 BUFFEREDSTREAM_FLUSH_LOCKED("")
1380
1381 //---< print Top Ten Used Blocks >---
1382 if (used_topSizeBlocks > 0) {
1383 unsigned int printed_topSizeBlocks = 0;
1384 for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
1385 printed_topSizeBlocks++;
1386 if (TopSizeArray[i].blob_name == NULL) {
1387 TopSizeArray[i].blob_name = os::strdup("unnamed blob or blob name unavailable");
1388 }
1389 // heap->find_start() is safe. Only works on _segmap.
1390 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
1391 HeapBlock* heapBlock = TopSizeArray[i].start;
1392 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(heapBlock));
1393 if (this_blob != NULL) {
1394 //---< access these fields only if we own the CodeCache_lock >---
1395 //---< blob address >---
1396 ast->print(INTPTR_FORMAT, p2i(this_blob));
1397 ast->fill_to(19);
1398 //---< blob offset from CodeHeap begin >---
1399 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
1400 ast->fill_to(33);
1401 } else {
1402 //---< block address >---
1403 ast->print(INTPTR_FORMAT, p2i(TopSizeArray[i].start));
1404 ast->fill_to(19);
1405 //---< block offset from CodeHeap begin >---
1406 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
1407 ast->fill_to(33);
1408 }
1409
1410 //---< print size, name, and signature (for nMethods) >---
1411 bool is_nmethod = TopSizeArray[i].nm_size > 0;
1412 if (is_nmethod) {
1413 //---< nMethod size in hex >---
1414 ast->print(PTR32_FORMAT, TopSizeArray[i].nm_size);
1415 ast->print("(" SIZE_FORMAT_W(4) "K)", TopSizeArray[i].nm_size/K);
1416 ast->fill_to(51);
1417 ast->print(" %c", blobTypeChar[TopSizeArray[i].type]);
1418 //---< compiler information >---
1419 ast->fill_to(56);
1420 ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
1421 //---< method temperature >---
1422 ast->fill_to(67);
1423 ast->print("%5d", TopSizeArray[i].temperature);
1424 //---< name and signature >---
1425 ast->fill_to(67+6);
1426 if (TopSizeArray[i].type == nMethod_dead) {
1427 ast->print(" zombie method ");
1428 }
1429 ast->print("%s", TopSizeArray[i].blob_name);
1430 } else {
1431 //---< block size in hex >---
1432 ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
1433 ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
1434 //---< no compiler information >---
1435 ast->fill_to(56);
1436 //---< name and signature >---
1437 ast->fill_to(67+6);
1438 ast->print("%s", TopSizeArray[i].blob_name);
1439 }
1440 ast->cr();
1441 BUFFEREDSTREAM_FLUSH_AUTO("")
1442 }
1443 if (used_topSizeBlocks != printed_topSizeBlocks) {
1444 ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
1445 for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
1446 ast->print_cr(" TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
1447 BUFFEREDSTREAM_FLUSH_AUTO("")
1448 }
1449 }
1450 BUFFEREDSTREAM_FLUSH("\n\n")
1451 }
1452 }
1453
1454 //-----------------------------
1455 //-- Print Usage Histogram --
1456 //-----------------------------
1457
1458 if (SizeDistributionArray != NULL) {
2199 }
2200
2201
2202 void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
2203 if (!initialization_complete) {
2204 return;
2205 }
2206
2207 const char* heapName = get_heapName(heap);
2208 get_HeapStatGlobals(out, heapName);
2209
2210 if ((StatArray == NULL) || (alloc_granules == 0)) {
2211 return;
2212 }
2213 BUFFEREDSTREAM_DECL(ast, out)
2214
2215 unsigned int granules_per_line = 128;
2216 char* low_bound = heap->low_boundary();
2217 CodeBlob* last_blob = NULL;
2218 bool name_in_addr_range = true;
2219 bool have_locks = holding_required_locks();
2220
2221 //---< print at least 128K per block (i.e. between headers) >---
2222 if (granules_per_line*granule_size < 128*K) {
2223 granules_per_line = (unsigned int)((128*K)/granule_size);
2224 }
2225
2226 printBox(ast, '=', "M E T H O D N A M E S for ", heapName);
2227 ast->print_cr(" Method names are dynamically retrieved from the code cache at print time.\n"
2228 " Due to the living nature of the code heap and because the CodeCache_lock\n"
2229 " is not continuously held, the displayed name might be wrong or no name\n"
2230 " might be found at all. The likelihood for that to happen increases\n"
2231 " over time passed between aggregation and print steps.\n");
2232 BUFFEREDSTREAM_FLUSH_LOCKED("")
2233
2234 for (unsigned int ix = 0; ix < alloc_granules; ix++) {
2235 //---< print a new blob on a new line >---
2236 if (ix%granules_per_line == 0) {
2237 if (!name_in_addr_range) {
2238 ast->print_cr("No methods, blobs, or stubs found in this address range");
2239 }
2240 name_in_addr_range = false;
2241
2242 size_t end_ix = (ix+granules_per_line <= alloc_granules) ? ix+granules_per_line : alloc_granules;
2243 ast->cr();
2244 ast->print_cr("--------------------------------------------------------------------");
2245 ast->print_cr("Address range [" INTPTR_FORMAT "," INTPTR_FORMAT "), " SIZE_FORMAT "k", p2i(low_bound+ix*granule_size), p2i(low_bound + end_ix*granule_size), (end_ix - ix)*granule_size/(size_t)K);
2246 ast->print_cr("--------------------------------------------------------------------");
2247 BUFFEREDSTREAM_FLUSH_AUTO("")
2248 }
2249 // Only check granule if it contains at least one blob.
2250 unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
2251 StatArray[ix].stub_count + StatArray[ix].dead_count;
2252 if (nBlobs > 0 ) {
2253 for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
2254 // heap->find_start() is safe. Only works on _segmap.
2255 // Returns NULL or void*. Returned CodeBlob may be uninitialized.
2256 char* this_seg = low_bound + ix*granule_size + is;
2257 CodeBlob* this_blob = (CodeBlob*)(heap->find_start(this_seg));
2258 bool blob_is_safe = blob_access_is_safe(this_blob);
2259 // blob could have been flushed, freed, and merged.
2260 // this_blob < last_blob is an indicator for that.
2261 if (blob_is_safe && (this_blob > last_blob)) {
2262 last_blob = this_blob;
2263
2264 //---< get type and name >---
2265 blobType cbType = noType;
2266 if (segment_granules) {
2267 cbType = (blobType)StatArray[ix].type;
2268 } else {
2269 //---< access these fields only if we own the CodeCache_lock >---
2270 if (have_locks) {
2271 cbType = get_cbType(this_blob);
2272 }
2273 }
2274
2275 //---< access these fields only if we own the CodeCache_lock >---
2276 const char* blob_name = "<unavailable>";
2277 nmethod* nm = NULL;
2278 if (have_locks) {
2279 blob_name = this_blob->name();
2280 nm = this_blob->as_nmethod_or_null();
2281 // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
2282 if (blob_name == NULL) {
2283 blob_name = "<unavailable>";
2284 }
2285 else { // This check should never fail because we are now holding all required locks.
2286 guarantee(os::is_readable_pointer(blob_name), "Oops!");
2287 }
2288 }
2289
2290 //---< print table header for new print range >---
2291 if (!name_in_addr_range) {
2292 name_in_addr_range = true;
2293 ast->fill_to(51);
2294 ast->print("%9s", "compiler");
2295 ast->fill_to(61);
2296 ast->print_cr("%6s", "method");
2297 ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
2298 BUFFEREDSTREAM_FLUSH_AUTO("")
2299 }
2300
2301 //---< print line prefix (address and offset from CodeHeap start) >---
2302 ast->print(INTPTR_FORMAT, p2i(this_blob));
2303 ast->fill_to(19);
2304 ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
2305 ast->fill_to(33);
2306
2307 // access nmethod and Method fields only if we own the CodeCache_lock.
2308 // This fact is implicitly transported via nm != NULL.
2309 if (nmethod_access_is_safe(nm)) {
2310 Method* method = nm->method();
2311 ResourceMark rm;
2312 //---< collect all data to locals as quickly as possible >---
2313 unsigned int total_size = nm->total_size();
2314 int hotness = nm->hotness_counter();
2315 bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
2316 //---< nMethod size in hex >---
2317 ast->print(PTR32_FORMAT, total_size);
2318 ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
2319 //---< compiler information >---
2320 ast->fill_to(51);
2321 ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
2322 //---< method temperature >---
2323 ast->fill_to(62);
2324 ast->print("%5d", hotness);
2325 //---< name and signature >---
2326 ast->fill_to(62+6);
2327 ast->print("%s", blobTypeName[cbType]);
2328 ast->fill_to(82+6);
2329 if (cbType == nMethod_dead) {
2495 ast->print("|");
2496 }
2497 ast->cr();
2498
2499 // can't use BUFFEREDSTREAM_FLUSH_IF("", 512) here.
2500 // can't use this expression. bufferedStream::capacity() does not exist.
2501 // if ((ast->capacity() - ast->size()) < 512) {
2502 // Assume instead that default bufferedStream capacity (4K) was used.
2503 if (ast->size() > 3*K) {
2504 ttyLocker ttyl;
2505 out->print("%s", ast->as_string());
2506 ast->reset();
2507 }
2508
2509 ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
2510 ast->fill_to(19);
2511 ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
2512 }
2513 }
2514
2515 // Find out which blob type we have at hand.
2516 // Return "noType" if anything abnormal is detected.
2517 CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
2518 if (cb != NULL) {
2519 if (cb->is_runtime_stub()) return runtimeStub;
2520 if (cb->is_deoptimization_stub()) return deoptimizationStub;
2521 if (cb->is_uncommon_trap_stub()) return uncommonTrapStub;
2522 if (cb->is_exception_stub()) return exceptionStub;
2523 if (cb->is_safepoint_stub()) return safepointStub;
2524 if (cb->is_adapter_blob()) return adapterBlob;
2525 if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
2526 if (cb->is_buffer_blob()) return bufferBlob;
2527
2528 //---< access these fields only if we own CodeCache_lock and Compile_lock >---
2529 // Should be ensured by caller. aggregate() and print_names() do that.
2530 if (holding_required_locks()) {
2531 nmethod* nm = cb->as_nmethod_or_null();
2532 if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
2533 if (nm->is_zombie()) return nMethod_dead;
2534 if (nm->is_unloaded()) return nMethod_unloaded;
2535 if (nm->is_in_use()) return nMethod_inuse;
2536 if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused;
2537 if (nm->is_alive()) return nMethod_alive;
2538 return nMethod_dead;
2539 }
2540 }
2541 }
2542 return noType;
2543 }
2544
2545 // make sure the blob at hand is not garbage.
2546 bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob) {
2547 if (this_blob != NULL) { // These checks should never fail because we are now holding all required locks.
2548 guarantee(os::is_readable_pointer(this_blob->content_begin()), "Oops!");
2549 guarantee(os::is_readable_pointer((address)(this_blob->relocation_begin())), "Oops!");
2550 }
2551 return (this_blob != NULL) && // a blob must have been found, obviously
2552 (this_blob->header_size() >= 0) &&
2553 (this_blob->relocation_size() >= 0) &&
2554 ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
2555 ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin()));
2556 }
2557
2558 // make sure the nmethod at hand (and the linked method) is not garbage.
2559 bool CodeHeapState::nmethod_access_is_safe(nmethod* nm) {
2560 Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() was found to be uninitialized, i.e. != NULL, but invalid.
2561 // This check should never fail because we are now holding all required locks.
2562 guarantee((method == NULL) || os::is_readable_pointer(method), "Oops! nmethod: " INTPTR_FORMAT ", method: " INTPTR_FORMAT, p2i(nm), p2i(method)); // Should not occur anymore because we are now holding all required locks.
2563
2564 return (nm != NULL) && (method != NULL) && nm->is_alive() && (method->signature() != NULL);
2565 }
2566
2567 bool CodeHeapState::holding_required_locks() {
2568 return SafepointSynchronize::is_at_safepoint() ||
2569 (CodeCache_lock->owned_by_self() && Compile_lock->owned_by_self());
2570 }
|