85 ::memset(_spaces, 0, sizeof(_spaces)); 86 test_metaspace_retrieve_chunk_geometry(Metaspace::NonClassType, &_chunk_geometry); 87 } 88 89 virtual void TearDown() { 90 for (int i = 0; i < NUM_PARALLEL_METASPACES; i ++) { 91 if (_spaces[i].space != NULL) { 92 delete _spaces[i].space; 93 delete _spaces[i].lock; 94 } 95 } 96 } 97 98 void create_space(int i) { 99 assert(i >= 0 && i < NUM_PARALLEL_METASPACES, "Sanity"); 100 assert(_spaces[i].space == NULL && _spaces[i].allocated == 0, "Sanity"); 101 if (_spaces[i].lock == NULL) { 102 _spaces[i].lock = new Mutex(Monitor::native, "gtest-MetaspaceAllocationTest-lock", false, Monitor::_safepoint_check_never); 103 ASSERT_TRUE(_spaces[i].lock != NULL); 104 } 105 // Let every ~10th space be an unsafe anonymous one to test different allocation patterns. 106 const Metaspace::MetaspaceType msType = (os::random() % 100 < 10) ? 107 Metaspace::UnsafeAnonymousMetaspaceType : Metaspace::StandardMetaspaceType; 108 { 109 // Pull lock during space creation, since this is what happens in the VM too 110 // (see ClassLoaderData::metaspace_non_null(), which we mimick here). 111 MutexLocker ml(_spaces[i].lock, Mutex::_no_safepoint_check_flag); 112 _spaces[i].space = new ClassLoaderMetaspace(_spaces[i].lock, msType); 113 } 114 _spaces[i].allocated = 0; 115 ASSERT_TRUE(_spaces[i].space != NULL); 116 } 117 118 // Returns the index of a random space where index is [0..metaspaces) and which is 119 // empty, non-empty or full. 120 // Returns -1 if no matching space exists. 121 enum fillgrade { fg_empty, fg_non_empty, fg_full }; 122 int get_random_matching_space(int metaspaces, fillgrade fg) { 123 const int start_index = os::random() % metaspaces; 124 int i = start_index; 125 do { 126 if (fg == fg_empty && _spaces[i].is_empty()) { 127 return i; | 85 ::memset(_spaces, 0, sizeof(_spaces)); 86 test_metaspace_retrieve_chunk_geometry(Metaspace::NonClassType, &_chunk_geometry); 87 } 88 89 virtual void TearDown() { 90 for (int i = 0; i < NUM_PARALLEL_METASPACES; i ++) { 91 if (_spaces[i].space != NULL) { 92 delete _spaces[i].space; 93 delete _spaces[i].lock; 94 } 95 } 96 } 97 98 void create_space(int i) { 99 assert(i >= 0 && i < NUM_PARALLEL_METASPACES, "Sanity"); 100 assert(_spaces[i].space == NULL && _spaces[i].allocated == 0, "Sanity"); 101 if (_spaces[i].lock == NULL) { 102 _spaces[i].lock = new Mutex(Monitor::native, "gtest-MetaspaceAllocationTest-lock", false, Monitor::_safepoint_check_never); 103 ASSERT_TRUE(_spaces[i].lock != NULL); 104 } 105 // Let every ~10th space be a short-lived one to test different allocation patterns. 106 const Metaspace::MetaspaceType msType = (os::random() % 100 < 10) ? 107 Metaspace::ShortLivedMetaspaceType : Metaspace::StandardMetaspaceType; 108 { 109 // Pull lock during space creation, since this is what happens in the VM too 110 // (see ClassLoaderData::metaspace_non_null(), which we mimick here). 111 MutexLocker ml(_spaces[i].lock, Mutex::_no_safepoint_check_flag); 112 _spaces[i].space = new ClassLoaderMetaspace(_spaces[i].lock, msType); 113 } 114 _spaces[i].allocated = 0; 115 ASSERT_TRUE(_spaces[i].space != NULL); 116 } 117 118 // Returns the index of a random space where index is [0..metaspaces) and which is 119 // empty, non-empty or full. 120 // Returns -1 if no matching space exists. 121 enum fillgrade { fg_empty, fg_non_empty, fg_full }; 122 int get_random_matching_space(int metaspaces, fillgrade fg) { 123 const int start_index = os::random() % metaspaces; 124 int i = start_index; 125 do { 126 if (fg == fg_empty && _spaces[i].is_empty()) { 127 return i; |