// Default load factor but only 1 concurrent thread anticipated. // This gives ~48 mill/s cache lookups static final ConcurrentHashMap getMethodCache_chm = new ConcurrentHashMap(16, 0.75f, 1); // Let's try load factor 0.5. // This seems a bit worse - something around 42 mill/s or so. /*static final ConcurrentHashMap getMethodCache_chm = new ConcurrentHashMap(16, 0.5f, 1);*/ // Let's try load factor 0.25 just for fun. // Gives ~47 mill/s or so. So I guess the load factor of 0.75 is fine. /*static final ConcurrentHashMap getMethodCache_chm = new ConcurrentHashMap(16, 0.25f, 1);*/ static _MethodCache getMethodCache_chm(Class c) { _MethodCache cache = getMethodCache_chm.get(c); if (cache == null) getMethodCache_chm.put(c, cache = _MethodCache(c)); ret cache; } // This one gets ~44 mill/s cache lookups which is definitely better than the old method svoid bench_concurrentHashMapBasedMethodCache_tweaked() { print("Loading 1000 classes"); L classes = map classForName(takeFirst(1000, classNamesInJigsawModule("java.base"))); print("Have " + nClasses(classes) + ". Filling method caches"); time { for (c : classes) assertNotNull(getMethodCache_chm(c)); } print("Looking them up again."); benchFor10(-> { for (c : classes) assertNotNull(getMethodCache_chm(c)); }); print("Classes in method cache: " + l(getMethodCache_chm)); }