/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License.
*/
/** * Support for optimal checking of time stamps of certain files in * NetBeans directory structure. * * @author Jaroslav Tulach <jaroslav.tulach@netbeans.org> * @since 2.9
*/ publicfinalclass Stamps { privatestaticfinal Logger LOG = Logger.getLogger(Stamps.class.getName()); privatestatic AtomicLong moduleJARs; privatestatic File moduleNewestFile; privatestatic File[] fallbackCache; privatestaticboolean populated; privatestaticBoolean clustersChanged;
private Worker worker = new Worker();
private Stamps() {
}
/** This class can be executed from command line to perform various checks * on installed NetBeans, however outside of running NetBeans. *
*/ staticvoid main(String... args) { if (args.length == 1 && "reset".equals(args[0])) { // NOI18N
moduleJARs = null;
Clusters.clear();
clustersChanged = null;
fallbackCache = null;
stamp(false); return;
} if (args.length == 1 && "init".equals(args[0])) { // NOI18N
moduleJARs = null;
Clusters.clear();
clustersChanged = null;
fallbackCache = null;
stamp(true); return;
} if (args.length == 1 && "clear".equals(args[0])) { // NOI18N
moduleJARs = null;
Clusters.clear();
clustersChanged = null;
fallbackCache = null; return;
}
} privatestaticfinal Stamps MODULES_JARS = new Stamps(); /** Creates instance of stamp that checks timestamp for all files that affect * module classloading and related caches.
*/ publicstatic Stamps getModulesJARs() { return MODULES_JARS;
}
/** Finds out the time of last modifications of files that influnce * this cache. Each cached file needs to be "younger". * @return time in ms since epoch
*/ publiclong lastModified() { return moduleJARs();
}
/** Checks whether a cache exists * * @param cache name of the cache * @return true if the cache exists and is not out of date
*/ publicboolean exists(String cache) { return file(cache, null) != null;
}
/** Opens the access to cache object as a stream. * @param cache name of the cache * @return stream to read from the cache or null if the cache is not valid
*/ public InputStream asStream(String cache) {
ByteBuffer bb = asByteBuffer(cache, false, false); if (bb == null) { returnnull;
} returnnew ByteArrayInputStream(bb.array());
}
/** Getter for mmapped buffer access to the cache. * @param cache the file to access * @return mmapped read only buffer
*/ public MappedByteBuffer asMappedByteBuffer(String cache) { return (MappedByteBuffer)asByteBuffer(cache, true, true);
}
/** Opens the access to cache object as a stream. * @param cache name of the cache * @return stream to read from the cache or null if the cache is not valid
*/ public ByteBuffer asByteBuffer(String cache) { return asByteBuffer(cache, true, false);
} final File file(String cache, int[] len) { if (clustersChanged()) { returnnull;
}
checkPopulateCache();
synchronized (this) { if (worker.isProcessing(cache)) {
LOG.log(Level.FINE, "Worker processing when asking for {0}", cache); // NOI18N returnnull;
}
} return fileImpl(cache, len, moduleJARs());
}
/** Method for registering updates to caches. * @param updater the callback to start when flushing caches * @param cache name of the file to store the cache into * @param append write from scratch or append?
*/ publicvoid scheduleSave(Updater updater, String cache, boolean append) { boolean firstAdd;
firstAdd = scheduleSaveImpl(updater, cache, append);
LOG.log(firstAdd ? Level.FINE : Level.FINER, "Scheduling save for {0} cache", cache
);
Clusters.scheduleSave(this);
}
/** Flushes all caches. * @param delay the delay to wait with starting the parsing, if zero, that also means * we want to wait for the end of parsing
*/ publicvoid flush(int delay) { synchronized (worker) {
worker.start(delay);
}
}
/** Waits for the worker to finish */ publicvoid shutdown() {
waitFor(true);
}
/** Computes and returns timestamp for all files that affect * module classloading and related caches. * @return
*/ staticlong moduleJARs() {
AtomicLong local = moduleJARs; if (local == null) {
local = new AtomicLong();
AtomicReference<File> newestFile = new AtomicReference<File>();
stamp(true, local, newestFile);
moduleJARs = local;
moduleNewestFile = newestFile.get();
} return local.longValue();
}
// // Implementation. As less dependecies on other NetBeans clases, as possible, please. // This will be called externally from a launcher. //
privatestatic AtomicLong stamp(boolean checkStampFile) {
AtomicLong result = new AtomicLong();
AtomicReference<File> newestFile = new AtomicReference<File>();
stamp(checkStampFile, result, newestFile); return result;
}
Set<File> processedDirs = new HashSet<File>();
String[] relativeDirs = Clusters.relativeDirsWithHome();
String home = System.getProperty ("netbeans.home"); // NOI18N if (home != null) { long stamp = stampForCluster (new File (home), result, newestFile, processedDirs, checkStampFile, true, null);
sb.append(relativeDirs[0]).append('=').append(stamp).append('\n');
}
String[] drs = Clusters.dirs(); for (int i = 0; i < drs.length; i++) { final File clusterDir = new File(drs[i]); long stamp = stampForCluster(clusterDir, result, newestFile, processedDirs, checkStampFile, true, null); if (stamp != -1) {
sb.append("cluster.").append(relativeDirs[i + 1]).append('=').append(stamp).append('\n');
}
}
File user = Places.getUserDirectory(); if (user != null) {
AtomicInteger crc = new AtomicInteger();
stampForCluster(user, result, newestFile, new HashSet<File>(), false, false, crc);
sb.append("user=").append(result.longValue()).append('\n');
sb.append("crc=").append(crc.intValue()).append('\n');
sb.append("locale=").append(Locale.getDefault()).append('\n');
sb.append("branding=").append(NbBundle.getBranding()).append('\n');
sb.append("java.version=").append(System.getProperty("java.version")).append('\n');
sb.append("java.vm.version=").append(System.getProperty("java.vm.version")).append('\n'); if (BaseUtilities.isWindows()) { /* NETBEANS-1914: On Windows (but not on Linux or MacOS), the cache directory has been observed to contain absolute paths to the NetBeans install directory (netbeans.home). This can cause errors on startup if said directory is later moved. As a workaround, include the netbeans.home path among the values that will cause the cache to be invalidated if changed. (A better solution would be to get rid of the absolute paths;
but after some investigation, I could not figure out how to do this.) */
sb.append("netbeans.home=").append(home == null ? "" : home).append('\n');
}
File checkSum = new File(Places.getCacheDirectory(), "lastModified/all-checksum.txt"); if (!compareAndUpdateFile(checkSum, sb.toString(), result)) {
discardCachesImpl(result);
}
}
}
File[] children = file.listFiles(); if (children == null) { if (crc != null) {
crc.addAndGet(file.getName().length());
} long time = file.lastModified(); if (time > result.longValue()) {
newestFile.set(file);
result.set(time);
} returnfalse;
}
for (File f : children) {
highestStampForDir(f, newestFile, result, crc);
} returntrue;
}
privatestaticboolean compareAndUpdateFile(File file, String content, AtomicLong result) { try { byte[] expected = content.getBytes(StandardCharsets.UTF_8); byte[] read = newbyte[expected.length];
FileInputStream is = null; boolean areCachesOK; boolean writeFile; long lastMod; try {
is = new FileInputStream(file); int len = is.read(read);
areCachesOK = len == read.length && is.available() == 0 && Arrays.equals(expected, read);
writeFile = !areCachesOK;
lastMod = file.lastModified();
} catch (FileNotFoundException notFoundEx) { // ok, running for the first time, no need to invalidate the cache
areCachesOK = true;
writeFile = true;
lastMod = result.get();
} finally { if (is != null) {
is.close();
}
} if (writeFile) {
file.getParentFile().mkdirs();
FileOutputStream os = new FileOutputStream(file);
os.write(expected);
os.close(); if (areCachesOK) {
file.setLastModified(lastMod);
}
} else { if (lastMod > result.get()) {
result.set(lastMod);
}
} return areCachesOK;
} catch (IOException ex) {
ex.printStackTrace(); returnfalse;
}
}
privatestaticvoid deleteCache(File cacheFile) throws IOException { int fileCounter = 0; if (cacheFile.exists()) { // all of this mess is here because Windows can't delete mmaped file.
File tmpFile = new File(cacheFile.getParentFile(), cacheFile.getName() + "." + fileCounter++);
tmpFile.delete(); // delete any leftover file from previous session boolean renamed = false;
Random r = null; for (int i = 0; i < 10; i++) {
renamed = cacheFile.renameTo(tmpFile); // try to rename it if (renamed) { break;
}
LOG.log(Level.INFO, "cannot rename (#{0}): {1}", new Object[]{i, cacheFile}); // NOI18N // try harder
System.gc();
System.runFinalization();
LOG.info("after GC"); // NOI18N if (r == null) {
r = new Random();
} try { finalint ms = r.nextInt(1000) + 1; Thread.sleep(ms);
LOG.log(Level.INFO, "Slept {0} ms", ms);
} catch (InterruptedException ex) {
LOG.log(Level.INFO, "Interrupted", ex); // NOI18N
}
} if (!renamed) { // still delete on exit, so next start is ok
cacheFile.deleteOnExit(); thrownew IOException("Could not delete: " + cacheFile); // NOI18N
} if (!tmpFile.delete()) {
tmpFile.deleteOnExit();
} // delete now or later
}
}
File cache = Places.getCacheDirectory();
String[] children = cache.list(); if (children != null && children.length > 0) { return;
}
InputStream is = Stamps.getModulesJARs().asStream("populate.zip"); // NOI18N if (is == null) { return;
}
ZipInputStream zip = null;
FileOutputStream os = null; try { byte[] arr = newbyte[4096];
LOG.log(Level.FINE, "Found populate.zip about to extract it into {0}", cache);
zip = new ZipInputStream(is); for (;;) {
ZipEntry en = zip.getNextEntry(); if (en == null) { break;
} if (en.isDirectory()) { continue;
}
File f = new File(cache, en.getName().replace('/', File.separatorChar));
f.getParentFile().mkdirs();
os = new FileOutputStream(f); for (;;) { int len = zip.read(arr); if (len == -1) { break;
}
os.write(arr, 0, len);
}
os.close();
}
zip.close();
} catch (IOException ex) {
LOG.log(Level.INFO, "Failed to populate {0}", cache);
}
}
privatestatic File fileImpl(String cache, int[] len, long moduleJARs) {
File cacheFile = new File(Places.getCacheDirectory(), cache); long last = cacheFile.lastModified(); if (last <= 0) {
LOG.log(Level.FINE, "Cache does not exist when asking for {0}", cache); // NOI18N
cacheFile = findFallbackCache(cache); if (cacheFile == null || (last = cacheFile.lastModified()) <= 0) { returnnull;
}
LOG.log(Level.FINE, "Found fallback cache at {0}", cacheFile);
}
if (moduleJARs > last) {
LOG.log(Level.FINE, "Timestamp does not pass when asking for {0}. Newest file {1}", new Object[] { cache, moduleNewestFile }); // NOI18N returnnull;
}
long longLen = cacheFile.length(); if (longLen > Integer.MAX_VALUE) {
LOG.log(Level.WARNING, "Cache file is too big: {0} bytes for {1}", new Object[]{longLen, cacheFile}); // NOI18N returnnull;
} if (len != null) {
len[0] = (int)longLen;
}
/** A callback interface to flush content of some cache at a suitable * point in time.
*/ publicstaticinterface Updater { /** Callback method to allow storage of the cache to a stream. * If an excetion is thrown, cache is invalidated. * * @param os the stream to write to * @throws IOException exception in case something goes wrong
*/ publicvoid flushCaches(DataOutputStream os) throws IOException;
/** Callback method to notify the caller, that * caches are successfully written.
*/ publicvoid cacheReady();
}
/** Internal structure keeping info about storages.
*/ privatestaticfinalclass Store extends OutputStream { final Updater updater; final String cache; finalboolean append;
if (!append) {
deleteCache(cacheFile);
}
cacheFile.getParentFile().mkdirs();
LOG.log(Level.FINE, "Storing cache {0}", cacheFile);
os = new FileOutputStream(cacheFile, append); //append new entries only
DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(this, 1024 * 1024));
public Worker() { super("Flushing caches");
storages = new LinkedList<Stamps.Store>();
processing = new HashSet<String>();
setPriority(MIN_PRIORITY);
}
publicsynchronizedvoid start(int time) { if (delay == null) {
delay = new AtomicInteger(time); super.start();
}
}
publicsynchronizedboolean addStorage(Store s) { boolean addNew = true;
processing.add(s.cache); for (Iterator<Stamps.Store> it = storages.iterator(); it.hasNext();) {
Stamps.Store store = it.next(); if (store.equals(s)) {
it.remove();
addNew = false;
}
}
storages.add(s); return addNew;
}
@Override publicvoid run() { int before = delay.get(); for (int till = before; till >= 0; till -= 500) { try { synchronized (this) {
wait(500);
}
} catch (InterruptedException ex) {
LOG.log(Level.INFO, null, ex);
} if (before != delay.get()) { break;
}
} if (before > 512) {
delay.compareAndSet(before, 512);
}
long time = System.currentTimeMillis();
LOG.log(Level.FINE, "Storing caches {0}", storages);
HashSet<Store> notify = new HashSet<Stamps.Store>(); for (;;) {
Store store; synchronized (this) {
store = this.storages.poll(); if (store == null) { // ready for new round of work
worker = new Worker(); break;
}
} if (store.store(delay)) {
notify.add(store);
}
}
long much = System.currentTimeMillis() - time;
LOG.log(Level.FINE, "Done storing caches {0}", notify);
LOG.log(Level.FINE, "Took {0} ms", much);
processing.clear();
for (Stamps.Store store : notify) { if (!noNotify) {
store.updater.cacheReady();
}
}
LOG.log(Level.FINE, "Notified ready {0}", notify);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.