< prev index next >

src/hotspot/share/code/codeHeapState.cpp

Print this page
rev 54112 : 8216314: SIGILL in CodeHeapState::print_names()
Reviewed-by: thartmann, kvn

@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -38,19 +38,21 @@
 // Aggregation condenses the information of a piece of the CodeHeap
 // (4096 bytes by default) into an analysis granule. These granules
 // contain enough detail to gain initial insight while keeping the
 // internal sttructure sizes in check.
 //
-// The CodeHeap is a living thing. Therefore, the aggregate is collected
-// under the CodeCache_lock. The subsequent print steps are only locked
-// against concurrent aggregations. That keeps the impact on
-// "normal operation" (JIT compiler and sweeper activity) to a minimum.
-//
 // The second part, which consists of several, independent steps,
 // prints the previously collected information with emphasis on
 // various aspects.
 //
+// The CodeHeap is a living thing. Therefore, protection against concurrent
+// modification (by acquiring the CodeCache_lock) is necessary. It has
+// to be provided by the caller of the analysis functions.
+// If the CodeCache_lock is not held, the analysis functions may print
+// less detailed information or may just do nothing. It is by intention
+// that an unprotected invocation is not abnormally terminated.
+//
 // Data collection and printing is done on an "on request" basis.
 // While no request is being processed, there is no impact on performance.
 // The CodeHeap state analytics do have some memory footprint.
 // The "aggregate" step allocates some data structures to hold the aggregated
 // information for later output. These data structures live until they are

@@ -457,11 +459,11 @@
   unsigned long total_iterations = 0;
 
   bool  done             = false;
   const int min_granules = 256;
   const int max_granules = 512*K; // limits analyzable CodeHeap (with segment_granules) to 32M..128M
-                                  // results in StatArray size of 20M (= max_granules * 40 Bytes per element)
+                                  // results in StatArray size of 24M (= max_granules * 48 Bytes per element)
                                   // For a 1GB CodeHeap, the granule size must be at least 2kB to not violate the max_granles limit.
   const char* heapName   = get_heapName(heap);
   STRINGSTREAM_DECL(ast, out)
 
   if (!initialization_complete) {

@@ -496,10 +498,16 @@
     printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
     STRINGSTREAM_FLUSH("")
     return;
   }
 
+  if (!CodeCache_lock->owned_by_self()) {
+    printBox(ast, '-', "aggregate function called without holding the CodeCache_lock for ", heapName);
+    STRINGSTREAM_FLUSH("")
+    return;
+  }
+
   // Calculate granularity of analysis (and output).
   //   The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
   //   The CodeHeap can become fairly large, in particular in productive real-life systems.
   //
   //   It is often neither feasible nor desirable to aggregate the data with the highest possible

@@ -1030,10 +1038,11 @@
         ast->print_cr("No hotness data available");
       }
       STRINGSTREAM_FLUSH("\n")
 
       // This loop is intentionally printing directly to "out".
+      // It should not print anything, anyway.
       out->print("Verifying collected data...");
       size_t granule_segs = granule_size>>log2_seg_size;
       for (unsigned int ix = 0; ix < granules; ix++) {
         if (StatArray[ix].t1_count   > granule_segs) {
           out->print_cr("t1_count[%d]   = %d", ix, StatArray[ix].t1_count);

@@ -1073,10 +1082,11 @@
           out->print_cr("t1_space[%d] = %d, t2_space[%d] = %d, tx_space[%d] = %d, stub_space[%d] = %d", ix, StatArray[ix].t1_space, ix, StatArray[ix].t2_space, ix, StatArray[ix].tx_space, ix, StatArray[ix].stub_space);
         }
       }
 
       // This loop is intentionally printing directly to "out".
+      // It should not print anything, anyway.
       if (used_topSizeBlocks > 0) {
         unsigned int j = 0;
         if (TopSizeArray[0].len != currMax) {
           out->print_cr("currMax(%d) differs from TopSizeArray[0].len(%d)", currMax, TopSizeArray[0].len);
         }

@@ -1168,10 +1178,11 @@
   }
 
   //---<  calculate and fill remaining fields  >---
   if (FreeArray != NULL) {
     // This loop is intentionally printing directly to "out".
+    // It should not print anything, anyway.
     for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
       size_t lenSum = 0;
       FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
       for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
         CodeBlob *cb  = (CodeBlob*)(heap->find_start(h));

@@ -1225,10 +1236,11 @@
   //----------------------------
   //--  Print Top Used Blocks --
   //----------------------------
   {
     char*     low_bound = heap->low_boundary();
+    bool      have_CodeCache_lock = CodeCache_lock->owned_by_self();
 
     printBox(ast, '-', "Largest Used Blocks in ", heapName);
     print_blobType_legend(ast);
 
     ast->fill_to(51);

@@ -1243,16 +1255,23 @@
     //---<  print Top Ten Used Blocks  >---
     if (used_topSizeBlocks > 0) {
       unsigned int printed_topSizeBlocks = 0;
       for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
         printed_topSizeBlocks++;
-        CodeBlob*   this_blob = (CodeBlob*)(heap->find_start(TopSizeArray[i].start));
         nmethod*           nm = NULL;
-        const char* blob_name = "unnamed blob";
-        if (this_blob != NULL) {
+        const char* blob_name = "unnamed blob or blob name unavailable";
+        // heap->find_start() is safe. Only works on _segmap.
+        // Returns NULL or void*. Returned CodeBlob may be uninitialized.
+        HeapBlock* heapBlock = TopSizeArray[i].start;
+        CodeBlob*  this_blob = (CodeBlob*)(heap->find_start(heapBlock));
+        bool    blob_is_safe = blob_access_is_safe(this_blob, NULL);
+        if (blob_is_safe) {
+          //---<  access these fields only if we own the CodeCache_lock  >---
+          if (have_CodeCache_lock) {
           blob_name = this_blob->name();
           nm        = this_blob->as_nmethod_or_null();
+          }
           //---<  blob address  >---
           ast->print(INTPTR_FORMAT, p2i(this_blob));
           ast->fill_to(19);
           //---<  blob offset from CodeHeap begin  >---
           ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));

@@ -1264,14 +1283,22 @@
           //---<  block offset from CodeHeap begin  >---
           ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
           ast->fill_to(33);
         }
 
-
         //---<  print size, name, and signature (for nMethods)  >---
-        if ((nm != NULL) && (nm->method() != NULL)) {
+        // access nmethod and Method fields only if we own the CodeCache_lock.
+        // This fact is implicitly transported via nm != NULL.
+        if (CompiledMethod::nmethod_access_is_safe(nm)) {
           ResourceMark rm;
+          Method* method = nm->method();
+          if (nm->is_in_use()) {
+            blob_name = method->name_and_sig_as_C_string();
+          }
+          if (nm->is_not_entrant()) {
+            blob_name = method->name_and_sig_as_C_string();
+          }
           //---<  nMethod size in hex  >---
           unsigned int total_size = nm->total_size();
           ast->print(PTR32_FORMAT, total_size);
           ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
           ast->fill_to(51);

@@ -1282,14 +1309,16 @@
           //---<  method temperature  >---
           ast->fill_to(67);
           ast->print("%5d", nm->hotness_counter());
           //---<  name and signature  >---
           ast->fill_to(67+6);
-          if (nm->is_in_use())        {blob_name = nm->method()->name_and_sig_as_C_string(); }
-          if (nm->is_not_entrant())   {blob_name = nm->method()->name_and_sig_as_C_string(); }
-          if (nm->is_not_installed()) {ast->print("%s", " not (yet) installed method "); }
-          if (nm->is_zombie())        {ast->print("%s", " zombie method "); }
+          if (nm->is_not_installed()) {
+            ast->print(" not (yet) installed method ");
+          }
+          if (nm->is_zombie()) {
+            ast->print(" zombie method ");
+          }
           ast->print("%s", blob_name);
         } else {
           //---<  block size in hex  >---
           ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
           ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);

@@ -2089,10 +2118,11 @@
 
   unsigned int granules_per_line  = 128;
   char*        low_bound          = heap->low_boundary();
   CodeBlob*    last_blob          = NULL;
   bool         name_in_addr_range = true;
+  bool         have_CodeCache_lock = CodeCache_lock->owned_by_self();
 
   //---<  print at least 128K per block (i.e. between headers)  >---
   if (granules_per_line*granule_size < 128*K) {
     granules_per_line = (unsigned int)((128*K)/granule_size);
   }

@@ -2123,34 +2153,42 @@
     // Only check granule if it contains at least one blob.
     unsigned int nBlobs  = StatArray[ix].t1_count   + StatArray[ix].t2_count + StatArray[ix].tx_count +
                            StatArray[ix].stub_count + StatArray[ix].dead_count;
     if (nBlobs > 0 ) {
     for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
-      // heap->find_start() is safe. Only working with _segmap. Returns NULL or void*. Returned CodeBlob may be uninitialized.
-      CodeBlob* this_blob = (CodeBlob *)(heap->find_start(low_bound+ix*granule_size+is));
-      bool blob_initialized = (this_blob != NULL) && (this_blob->header_size() >= 0) && (this_blob->relocation_size() >= 0) &&
-                              ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
-                              ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin())) &&
-                              os::is_readable_pointer((address)(this_blob->relocation_begin())) &&
-                              os::is_readable_pointer(this_blob->content_begin());
+      // heap->find_start() is safe. Only works on _segmap.
+      // Returns NULL or void*. Returned CodeBlob may be uninitialized.
+      char*     this_seg  = low_bound + ix*granule_size + is;
+      CodeBlob* this_blob = (CodeBlob*)(heap->find_start(this_seg));
+      bool   blob_is_safe = blob_access_is_safe(this_blob, NULL);
       // blob could have been flushed, freed, and merged.
       // this_blob < last_blob is an indicator for that.
-      if (blob_initialized && (this_blob > last_blob)) {
+      if (blob_is_safe && (this_blob > last_blob)) {
         last_blob          = this_blob;
 
         //---<  get type and name  >---
         blobType       cbType = noType;
         if (segment_granules) {
           cbType = (blobType)StatArray[ix].type;
         } else {
+          //---<  access these fields only if we own the CodeCache_lock  >---
+          if (have_CodeCache_lock) {
           cbType = get_cbType(this_blob);
         }
+        }
+
+        //---<  access these fields only if we own the CodeCache_lock  >---
+        const char* blob_name = "<unavailable>";
+        nmethod*           nm = NULL;
+        if (have_CodeCache_lock) {
+          blob_name = this_blob->name();
+          nm        = this_blob->as_nmethod_or_null();
         // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
-        const char* blob_name = this_blob->name();
         if ((blob_name == NULL) || !os::is_readable_pointer(blob_name)) {
           blob_name = "<unavailable>";
         }
+        }
 
         //---<  print table header for new print range  >---
         if (!name_in_addr_range) {
           name_in_addr_range = true;
           ast->fill_to(51);

@@ -2165,12 +2203,12 @@
         ast->print(INTPTR_FORMAT, p2i(this_blob));
         ast->fill_to(19);
         ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
         ast->fill_to(33);
 
-        // this_blob->as_nmethod_or_null() is safe. Inlined, maybe invisible on stack.
-        nmethod*    nm     = this_blob->as_nmethod_or_null();
+        // access nmethod and Method fields only if we own the CodeCache_lock.
+        // This fact is implicitly transported via nm != NULL.
         if (CompiledMethod::nmethod_access_is_safe(nm)) {
           Method* method = nm->method();
           ResourceMark rm;
           //---<  collect all data to locals as quickly as possible  >---
           unsigned int total_size = nm->total_size();

@@ -2203,18 +2241,21 @@
             ast->print("%s", methNameS);
             ast->print("%s", methSigS);
           } else {
             ast->print("%s", blob_name);
           }
-        } else {
+        } else if (blob_is_safe) {
           ast->fill_to(62+6);
           ast->print("%s", blobTypeName[cbType]);
           ast->fill_to(82+6);
           ast->print("%s", blob_name);
+        } else {
+          ast->fill_to(62+6);
+          ast->print("<stale blob>");
         }
         STRINGSTREAM_FLUSH_LOCKED("\n")
-      } else if (!blob_initialized && (this_blob != last_blob) && (this_blob != NULL)) {
+      } else if (!blob_is_safe && (this_blob != last_blob) && (this_blob != NULL)) {
         last_blob          = this_blob;
         STRINGSTREAM_FLUSH_LOCKED("\n")
       }
     }
     } // nBlobs > 0

@@ -2377,10 +2418,13 @@
     if (cb->is_safepoint_stub())              return safepointStub;
     if (cb->is_adapter_blob())                return adapterBlob;
     if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
     if (cb->is_buffer_blob())                 return bufferBlob;
 
+    //---<  access these fields only if we own the CodeCache_lock  >---
+    // Should be ensured by caller. aggregate() amd print_names() do that.
+    if (CodeCache_lock->owned_by_self()) {
     nmethod*  nm = cb->as_nmethod_or_null();
     if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
       if (nm->is_not_installed()) return nMethod_inconstruction;
       if (nm->is_zombie())        return nMethod_dead;
       if (nm->is_unloaded())      return nMethod_unloaded;

@@ -2388,7 +2432,19 @@
       if (nm->is_alive() && !(nm->is_not_entrant()))   return nMethod_notused;
       if (nm->is_alive())         return nMethod_alive;
       return nMethod_dead;
     }
   }
+  }
   return noType;
 }
+
+bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob, CodeBlob* prev_blob) {
+  return (this_blob != NULL) && // a blob must have been found, obviously
+         ((this_blob == prev_blob) || (prev_blob == NULL)) &&  // when re-checking, the same blob must have been found
+         (this_blob->header_size() >= 0) &&
+         (this_blob->relocation_size() >= 0) &&
+         ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
+         ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin())) &&
+         os::is_readable_pointer((address)(this_blob->relocation_begin())) &&
+         os::is_readable_pointer(this_blob->content_begin());
+}
< prev index next >