mirror of
https://github.com/eclipse-cdt/cdt
synced 2025-06-08 10:16:03 +02:00
Fix for 170542, file access by index database not using memory mapped files.
This commit is contained in:
parent
31f276dafa
commit
d09020f2e4
17 changed files with 540 additions and 268 deletions
|
@ -90,7 +90,7 @@ public class TeamSharedIndexTest extends IndexTestBase {
|
||||||
|
|
||||||
private ICProject recreateProject(final String prjName) throws Exception {
|
private ICProject recreateProject(final String prjName) throws Exception {
|
||||||
final boolean[] changed= {false};
|
final boolean[] changed= {false};
|
||||||
IElementChangedListener waitListener= new IElementChangedListener() {
|
final IElementChangedListener listener = new IElementChangedListener() {
|
||||||
public void elementChanged(ElementChangedEvent event) {
|
public void elementChanged(ElementChangedEvent event) {
|
||||||
synchronized (changed) {
|
synchronized (changed) {
|
||||||
changed[0]= true;
|
changed[0]= true;
|
||||||
|
@ -98,23 +98,27 @@ public class TeamSharedIndexTest extends IndexTestBase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
CoreModel.getDefault().addElementChangedListener(waitListener);
|
|
||||||
final IWorkspace workspace = ResourcesPlugin.getWorkspace();
|
final IWorkspace workspace = ResourcesPlugin.getWorkspace();
|
||||||
final IProject prjHandle= workspace.getRoot().getProject(prjName);
|
CoreModel.getDefault().addElementChangedListener(listener);
|
||||||
workspace.run(new IWorkspaceRunnable() {
|
try {
|
||||||
public void run(IProgressMonitor monitor) throws CoreException {
|
final IProject prjHandle= workspace.getRoot().getProject(prjName);
|
||||||
IProjectDescription desc= IDEWorkbenchPlugin.getPluginWorkspace().newProjectDescription(prjName);
|
workspace.run(new IWorkspaceRunnable() {
|
||||||
prjHandle.create(desc, NPM);
|
public void run(IProgressMonitor monitor) throws CoreException {
|
||||||
prjHandle.open(0, NPM);
|
IProjectDescription desc= IDEWorkbenchPlugin.getPluginWorkspace().newProjectDescription(prjName);
|
||||||
}
|
prjHandle.create(desc, NPM);
|
||||||
}, null);
|
prjHandle.open(0, NPM);
|
||||||
synchronized(changed) {
|
}
|
||||||
if (!changed[0]) {
|
}, null);
|
||||||
changed.wait(INDEXER_WAIT_TIME);
|
synchronized(changed) {
|
||||||
assertTrue(changed[0]);
|
if (!changed[0]) {
|
||||||
|
changed.wait(INDEXER_WAIT_TIME);
|
||||||
|
assertTrue(changed[0]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
CoreModel.getDefault().removeElementChangedListener(waitListener);
|
finally {
|
||||||
|
CoreModel.getDefault().removeElementChangedListener(listener);
|
||||||
|
}
|
||||||
fPDOMManager.joinIndexer(INDEXER_WAIT_TIME, NPM);
|
fPDOMManager.joinIndexer(INDEXER_WAIT_TIME, NPM);
|
||||||
return CoreModel.getDefault().create(workspace.getRoot().getProject(prjName));
|
return CoreModel.getDefault().create(workspace.getRoot().getProject(prjName));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2006 Symbian Software Systems and others.
|
* Copyright (c) 2006, 2007 Symbian Software Systems and others.
|
||||||
* All rights reserved. This program and the accompanying materials
|
* All rights reserved. This program and the accompanying materials
|
||||||
* are made available under the terms of the Eclipse Public License v1.0
|
* are made available under the terms of the Eclipse Public License v1.0
|
||||||
* which accompanies this distribution, and is available at
|
* which accompanies this distribution, and is available at
|
||||||
|
@ -7,6 +7,7 @@
|
||||||
*
|
*
|
||||||
* Contributors:
|
* Contributors:
|
||||||
* Symbian - Initial implementation
|
* Symbian - Initial implementation
|
||||||
|
* Markus Schorn (Wind River Systems)
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
package org.eclipse.cdt.internal.pdom.tests;
|
package org.eclipse.cdt.internal.pdom.tests;
|
||||||
|
|
||||||
|
@ -22,6 +23,7 @@ import junit.framework.Test;
|
||||||
|
|
||||||
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
|
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.BTree;
|
import org.eclipse.cdt.internal.core.pdom.db.BTree;
|
||||||
|
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.IBTreeComparator;
|
import org.eclipse.cdt.internal.core.pdom.db.IBTreeComparator;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
||||||
|
@ -50,7 +52,7 @@ public class BTreeTests extends BaseTestCase {
|
||||||
// and invoke it multiple times per Junit test
|
// and invoke it multiple times per Junit test
|
||||||
protected void init(int degree) throws Exception {
|
protected void init(int degree) throws Exception {
|
||||||
dbFile = File.createTempFile("pdomtest", "db");
|
dbFile = File.createTempFile("pdomtest", "db");
|
||||||
db = new Database(dbFile);
|
db = new Database(dbFile, new ChunkCache(), 0);
|
||||||
rootRecord = Database.DATA_AREA;
|
rootRecord = Database.DATA_AREA;
|
||||||
comparator = new BTMockRecordComparator();
|
comparator = new BTMockRecordComparator();
|
||||||
btree = new BTree(db, rootRecord, degree, comparator);
|
btree = new BTree(db, rootRecord, degree, comparator);
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
*
|
*
|
||||||
* Contributors:
|
* Contributors:
|
||||||
* Andrew Ferguson (Symbian) - Initial implementation
|
* Andrew Ferguson (Symbian) - Initial implementation
|
||||||
|
* Markus Schorn (Wind River Systems)
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
package org.eclipse.cdt.internal.pdom.tests;
|
package org.eclipse.cdt.internal.pdom.tests;
|
||||||
|
|
||||||
|
@ -17,6 +18,7 @@ import java.util.Properties;
|
||||||
import junit.framework.Test;
|
import junit.framework.Test;
|
||||||
|
|
||||||
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
|
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
|
||||||
|
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
|
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
||||||
import org.eclipse.core.runtime.CoreException;
|
import org.eclipse.core.runtime.CoreException;
|
||||||
|
@ -35,7 +37,7 @@ public class DBPropertiesTests extends BaseTestCase {
|
||||||
protected void setUp() throws Exception {
|
protected void setUp() throws Exception {
|
||||||
dbLoc = File.createTempFile("test", "db");
|
dbLoc = File.createTempFile("test", "db");
|
||||||
dbLoc.deleteOnExit();
|
dbLoc.deleteOnExit();
|
||||||
db = new Database(dbLoc);
|
db = new Database(dbLoc, new ChunkCache(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void tearDown() throws Exception {
|
protected void tearDown() throws Exception {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2005, 2006 QNX Software Systems
|
* Copyright (c) 2005, 2007 QNX Software Systems
|
||||||
* All rights reserved. This program and the accompanying materials
|
* All rights reserved. This program and the accompanying materials
|
||||||
* are made available under the terms of the Eclipse Public License v1.0
|
* are made available under the terms of the Eclipse Public License v1.0
|
||||||
* which accompanies this distribution, and is available at
|
* which accompanies this distribution, and is available at
|
||||||
|
@ -8,6 +8,7 @@
|
||||||
* Contributors:
|
* Contributors:
|
||||||
* QNX Software Systems - initial API and implementation
|
* QNX Software Systems - initial API and implementation
|
||||||
* Andrew Ferguson (Symbian)
|
* Andrew Ferguson (Symbian)
|
||||||
|
* Markus Schorn (Wind River Systems)
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
package org.eclipse.cdt.internal.pdom.tests;
|
package org.eclipse.cdt.internal.pdom.tests;
|
||||||
|
|
||||||
|
@ -19,6 +20,7 @@ import junit.framework.Test;
|
||||||
import org.eclipse.cdt.core.testplugin.CTestPlugin;
|
import org.eclipse.cdt.core.testplugin.CTestPlugin;
|
||||||
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
|
import org.eclipse.cdt.core.testplugin.util.BaseTestCase;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.BTree;
|
import org.eclipse.cdt.internal.core.pdom.db.BTree;
|
||||||
|
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.IBTreeComparator;
|
import org.eclipse.cdt.internal.core.pdom.db.IBTreeComparator;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
||||||
|
@ -32,9 +34,10 @@ public class DBTest extends BaseTestCase {
|
||||||
|
|
||||||
protected void setUp() throws Exception {
|
protected void setUp() throws Exception {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
db = new Database(getTestDir().append(getName()+System.currentTimeMillis()+".dat").toFile());
|
db = new Database(getTestDir().append(getName()+System.currentTimeMillis()+".dat").toFile(),
|
||||||
|
new ChunkCache(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Test suite() {
|
public static Test suite() {
|
||||||
return suite(DBTest.class);
|
return suite(DBTest.class);
|
||||||
}
|
}
|
||||||
|
@ -48,9 +51,11 @@ public class DBTest extends BaseTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void tearDown() throws Exception {
|
protected void tearDown() throws Exception {
|
||||||
|
db.close();
|
||||||
if(!db.getLocation().delete()) {
|
if(!db.getLocation().delete()) {
|
||||||
db.getLocation().deleteOnExit();
|
db.getLocation().deleteOnExit();
|
||||||
}
|
}
|
||||||
|
db= null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBlockSizeAndFirstBlock() throws Exception {
|
public void testBlockSizeAndFirstBlock() throws Exception {
|
||||||
|
@ -118,7 +123,7 @@ public class DBTest extends BaseTestCase {
|
||||||
// Tests inserting and retrieving strings
|
// Tests inserting and retrieving strings
|
||||||
File f = getTestDir().append("testStrings.dat").toFile();
|
File f = getTestDir().append("testStrings.dat").toFile();
|
||||||
f.delete();
|
f.delete();
|
||||||
final Database db = new Database(f);
|
final Database db = new Database(f, new ChunkCache(), 0);
|
||||||
|
|
||||||
String[] names = {
|
String[] names = {
|
||||||
"ARLENE",
|
"ARLENE",
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
*
|
*
|
||||||
* Contributors:
|
* Contributors:
|
||||||
* Andrew Ferguson (Symbian) - Initial implementation
|
* Andrew Ferguson (Symbian) - Initial implementation
|
||||||
|
* Markus Schorn (Wind River Systems)
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
package org.eclipse.cdt.internal.pdom.tests;
|
package org.eclipse.cdt.internal.pdom.tests;
|
||||||
|
|
||||||
|
@ -25,6 +26,7 @@ import org.eclipse.cdt.internal.core.index.IIndexFragment;
|
||||||
import org.eclipse.cdt.internal.core.index.IWritableIndexFragment;
|
import org.eclipse.cdt.internal.core.index.IWritableIndexFragment;
|
||||||
import org.eclipse.cdt.internal.core.pdom.PDOM;
|
import org.eclipse.cdt.internal.core.pdom.PDOM;
|
||||||
import org.eclipse.cdt.internal.core.pdom.WritablePDOM;
|
import org.eclipse.cdt.internal.core.pdom.WritablePDOM;
|
||||||
|
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
|
||||||
import org.eclipse.core.resources.IResource;
|
import org.eclipse.core.resources.IResource;
|
||||||
import org.eclipse.core.runtime.NullProgressMonitor;
|
import org.eclipse.core.runtime.NullProgressMonitor;
|
||||||
|
|
||||||
|
@ -91,7 +93,7 @@ public class PDOMBugsTest extends BaseTestCase {
|
||||||
IIndexLocationConverter cvr= new ResourceContainerRelativeLocationConverter(cproject.getProject());
|
IIndexLocationConverter cvr= new ResourceContainerRelativeLocationConverter(cproject.getProject());
|
||||||
CCoreInternals.getPDOMManager().exportProjectPDOM(cproject, tmp, cvr);
|
CCoreInternals.getPDOMManager().exportProjectPDOM(cproject, tmp, cvr);
|
||||||
|
|
||||||
IWritableIndexFragment pdom = new WritablePDOM(tmp, cvr);
|
IWritableIndexFragment pdom = new WritablePDOM(tmp, cvr, new ChunkCache());
|
||||||
pdom.acquireReadLock();
|
pdom.acquireReadLock();
|
||||||
try {
|
try {
|
||||||
String id= pdom.getProperty(IIndexFragment.PROPERTY_FRAGMENT_ID);
|
String id= pdom.getProperty(IIndexFragment.PROPERTY_FRAGMENT_ID);
|
||||||
|
|
|
@ -1181,14 +1181,14 @@ public class CModelManager implements IResourceChangeListener, ICDescriptorListe
|
||||||
// stop the binary runner for this project
|
// stop the binary runner for this project
|
||||||
removeBinaryRunner(project);
|
removeBinaryRunner(project);
|
||||||
// stop indexing jobs for this project
|
// stop indexing jobs for this project
|
||||||
CCoreInternals.getPDOMManager().deleteProject(create(project), delta);
|
CCoreInternals.getPDOMManager().deleteProject(create(project));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closing(IProject project, IResourceDelta delta) {
|
private void closing(IProject project, IResourceDelta delta) {
|
||||||
// stop the binary runner for this project
|
// stop the binary runner for this project
|
||||||
removeBinaryRunner(project);
|
removeBinaryRunner(project);
|
||||||
// stop indexing jobs for this project
|
// stop indexing jobs for this project
|
||||||
CCoreInternals.getPDOMManager().removeProject(create(project));
|
CCoreInternals.getPDOMManager().closeProject(create(project));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ import org.eclipse.osgi.util.NLS;
|
||||||
public class Messages extends NLS {
|
public class Messages extends NLS {
|
||||||
private static final String BUNDLE_NAME = "org.eclipse.cdt.internal.core.pdom.messages"; //$NON-NLS-1$
|
private static final String BUNDLE_NAME = "org.eclipse.cdt.internal.core.pdom.messages"; //$NON-NLS-1$
|
||||||
public static String Checksums_taskComputeChecksums;
|
public static String Checksums_taskComputeChecksums;
|
||||||
|
public static String PDOMManager_ClosePDOMJob;
|
||||||
public static String PDOMManager_ExistingFileCollides;
|
public static String PDOMManager_ExistingFileCollides;
|
||||||
public static String PDOMManager_indexMonitorDetail;
|
public static String PDOMManager_indexMonitorDetail;
|
||||||
public static String PDOMManager_JoinIndexerTask;
|
public static String PDOMManager_JoinIndexerTask;
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
package org.eclipse.cdt.internal.core.pdom;
|
package org.eclipse.cdt.internal.core.pdom;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.BitSet;
|
import java.util.BitSet;
|
||||||
|
@ -48,6 +47,7 @@ import org.eclipse.cdt.internal.core.index.IIndexFragmentFile;
|
||||||
import org.eclipse.cdt.internal.core.index.IIndexFragmentInclude;
|
import org.eclipse.cdt.internal.core.index.IIndexFragmentInclude;
|
||||||
import org.eclipse.cdt.internal.core.index.IIndexFragmentName;
|
import org.eclipse.cdt.internal.core.index.IIndexFragmentName;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.BTree;
|
import org.eclipse.cdt.internal.core.pdom.db.BTree;
|
||||||
|
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
|
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
import org.eclipse.cdt.internal.core.pdom.db.Database;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
||||||
|
@ -114,23 +114,22 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
private IIndexLocationConverter locationConverter;
|
private IIndexLocationConverter locationConverter;
|
||||||
|
|
||||||
public PDOM(File dbPath, IIndexLocationConverter locationConverter) throws CoreException {
|
public PDOM(File dbPath, IIndexLocationConverter locationConverter) throws CoreException {
|
||||||
loadDatabase(dbPath);
|
this(dbPath, locationConverter, ChunkCache.getSharedInstance());
|
||||||
|
}
|
||||||
|
|
||||||
|
public PDOM(File dbPath, IIndexLocationConverter locationConverter, ChunkCache cache) throws CoreException {
|
||||||
|
loadDatabase(dbPath, cache);
|
||||||
this.locationConverter = locationConverter;
|
this.locationConverter = locationConverter;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void loadDatabase(File dbPath) throws CoreException {
|
private void loadDatabase(File dbPath, ChunkCache cache) throws CoreException {
|
||||||
fPath= dbPath;
|
fPath= dbPath;
|
||||||
boolean exists= fPath.exists();
|
db = new Database(fPath, cache, VERSION);
|
||||||
db = new Database(fPath);
|
fileIndex= null; // holds on to the database, so clear it.
|
||||||
|
|
||||||
if (exists) {
|
int version= db.getVersion();
|
||||||
int version= db.getVersion();
|
if (version == VERSION) {
|
||||||
if (version == VERSION) {
|
readLinkages();
|
||||||
readLinkages();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
db.setVersion(VERSION);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,9 +137,8 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
return locationConverter;
|
return locationConverter;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean versionMismatch() {
|
public boolean versionMismatch() throws CoreException {
|
||||||
if (db.getVersion() != VERSION) {
|
if (db.getVersion() != VERSION) {
|
||||||
db.setVersion(VERSION);
|
|
||||||
return true;
|
return true;
|
||||||
} else
|
} else
|
||||||
return false;
|
return false;
|
||||||
|
@ -231,8 +229,8 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
// Zero out the File Index and Linkages
|
// Zero out the File Index and Linkages
|
||||||
clearFileIndex();
|
clearFileIndex();
|
||||||
|
|
||||||
|
db.setVersion(VERSION);
|
||||||
db.putInt(PROPERTIES, 0);
|
db.putInt(PROPERTIES, 0);
|
||||||
|
|
||||||
db.putInt(LINKAGES, 0);
|
db.putInt(LINKAGES, 0);
|
||||||
fLinkageIDCache.clear();
|
fLinkageIDCache.clear();
|
||||||
}
|
}
|
||||||
|
@ -242,10 +240,10 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
fLinkageIDCache.clear();
|
fLinkageIDCache.clear();
|
||||||
try {
|
try {
|
||||||
db.close();
|
db.close();
|
||||||
} catch (IOException e) {
|
} catch (CoreException e) {
|
||||||
CCorePlugin.log(e);
|
CCorePlugin.log(e);
|
||||||
}
|
}
|
||||||
loadDatabase(file);
|
loadDatabase(file, db.getChunkCache());
|
||||||
oldFile.delete();
|
oldFile.delete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -541,6 +539,7 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
while (lockCount != 0 || waitingReaders > 0)
|
while (lockCount != 0 || waitingReaders > 0)
|
||||||
mutex.wait();
|
mutex.wait();
|
||||||
--lockCount;
|
--lockCount;
|
||||||
|
db.setWritable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -549,6 +548,11 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void releaseWriteLock(int establishReadLocks) {
|
public void releaseWriteLock(int establishReadLocks) {
|
||||||
|
try {
|
||||||
|
db.setReadOnly();
|
||||||
|
} catch (CoreException e) {
|
||||||
|
CCorePlugin.log(e);
|
||||||
|
}
|
||||||
assert lockCount == -1;
|
assert lockCount == -1;
|
||||||
lastWriteAccess= System.currentTimeMillis();
|
lastWriteAccess= System.currentTimeMillis();
|
||||||
synchronized (mutex) {
|
synchronized (mutex) {
|
||||||
|
@ -684,5 +688,10 @@ public class PDOM extends PlatformObject implements IIndexFragment, IPDOM {
|
||||||
|
|
||||||
public String getProperty(String propertyName) throws CoreException {
|
public String getProperty(String propertyName) throws CoreException {
|
||||||
return new DBProperties(db, PROPERTIES).getProperty(propertyName);
|
return new DBProperties(db, PROPERTIES).getProperty(propertyName);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void close() throws CoreException {
|
||||||
|
fLinkageIDCache.clear();
|
||||||
|
db.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -28,6 +28,7 @@ import java.util.Map;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
|
||||||
import org.eclipse.cdt.core.CCorePlugin;
|
import org.eclipse.cdt.core.CCorePlugin;
|
||||||
|
import org.eclipse.cdt.core.CCorePreferenceConstants;
|
||||||
import org.eclipse.cdt.core.dom.IPDOM;
|
import org.eclipse.cdt.core.dom.IPDOM;
|
||||||
import org.eclipse.cdt.core.dom.IPDOMIndexer;
|
import org.eclipse.cdt.core.dom.IPDOMIndexer;
|
||||||
import org.eclipse.cdt.core.dom.IPDOMIndexerTask;
|
import org.eclipse.cdt.core.dom.IPDOMIndexerTask;
|
||||||
|
@ -49,6 +50,7 @@ import org.eclipse.cdt.internal.core.index.IndexFactory;
|
||||||
import org.eclipse.cdt.internal.core.index.IndexerStateEvent;
|
import org.eclipse.cdt.internal.core.index.IndexerStateEvent;
|
||||||
import org.eclipse.cdt.internal.core.index.provider.IndexProviderManager;
|
import org.eclipse.cdt.internal.core.index.provider.IndexProviderManager;
|
||||||
import org.eclipse.cdt.internal.core.pdom.PDOM.IListener;
|
import org.eclipse.cdt.internal.core.pdom.PDOM.IListener;
|
||||||
|
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
|
||||||
import org.eclipse.cdt.internal.core.pdom.dom.PDOMFile;
|
import org.eclipse.cdt.internal.core.pdom.dom.PDOMFile;
|
||||||
import org.eclipse.cdt.internal.core.pdom.dom.PDOMProjectIndexLocationConverter;
|
import org.eclipse.cdt.internal.core.pdom.dom.PDOMProjectIndexLocationConverter;
|
||||||
import org.eclipse.cdt.internal.core.pdom.indexer.IndexerPreferences;
|
import org.eclipse.cdt.internal.core.pdom.indexer.IndexerPreferences;
|
||||||
|
@ -59,7 +61,6 @@ import org.eclipse.cdt.internal.core.pdom.indexer.nulli.PDOMNullIndexer;
|
||||||
import org.eclipse.core.resources.IFolder;
|
import org.eclipse.core.resources.IFolder;
|
||||||
import org.eclipse.core.resources.IProject;
|
import org.eclipse.core.resources.IProject;
|
||||||
import org.eclipse.core.resources.IResource;
|
import org.eclipse.core.resources.IResource;
|
||||||
import org.eclipse.core.resources.IResourceDelta;
|
|
||||||
import org.eclipse.core.resources.ResourcesPlugin;
|
import org.eclipse.core.resources.ResourcesPlugin;
|
||||||
import org.eclipse.core.runtime.CoreException;
|
import org.eclipse.core.runtime.CoreException;
|
||||||
import org.eclipse.core.runtime.IConfigurationElement;
|
import org.eclipse.core.runtime.IConfigurationElement;
|
||||||
|
@ -70,10 +71,13 @@ import org.eclipse.core.runtime.IStatus;
|
||||||
import org.eclipse.core.runtime.ListenerList;
|
import org.eclipse.core.runtime.ListenerList;
|
||||||
import org.eclipse.core.runtime.OperationCanceledException;
|
import org.eclipse.core.runtime.OperationCanceledException;
|
||||||
import org.eclipse.core.runtime.Platform;
|
import org.eclipse.core.runtime.Platform;
|
||||||
|
import org.eclipse.core.runtime.Preferences;
|
||||||
import org.eclipse.core.runtime.QualifiedName;
|
import org.eclipse.core.runtime.QualifiedName;
|
||||||
import org.eclipse.core.runtime.SafeRunner;
|
import org.eclipse.core.runtime.SafeRunner;
|
||||||
import org.eclipse.core.runtime.Status;
|
import org.eclipse.core.runtime.Status;
|
||||||
import org.eclipse.core.runtime.SubProgressMonitor;
|
import org.eclipse.core.runtime.SubProgressMonitor;
|
||||||
|
import org.eclipse.core.runtime.Preferences.IPropertyChangeListener;
|
||||||
|
import org.eclipse.core.runtime.Preferences.PropertyChangeEvent;
|
||||||
import org.eclipse.core.runtime.jobs.ISchedulingRule;
|
import org.eclipse.core.runtime.jobs.ISchedulingRule;
|
||||||
import org.eclipse.core.runtime.jobs.Job;
|
import org.eclipse.core.runtime.jobs.Job;
|
||||||
import org.eclipse.core.runtime.preferences.IEclipsePreferences.IPreferenceChangeListener;
|
import org.eclipse.core.runtime.preferences.IEclipsePreferences.IPreferenceChangeListener;
|
||||||
|
@ -154,6 +158,8 @@ public class PDOMManager implements IWritableIndexManager, IListener {
|
||||||
// the model listener is attached outside of the job in
|
// the model listener is attached outside of the job in
|
||||||
// order to avoid a race condition where its not noticed
|
// order to avoid a race condition where its not noticed
|
||||||
// that new projects are being created
|
// that new projects are being created
|
||||||
|
initializeDatabaseCache();
|
||||||
|
|
||||||
final CoreModel model = CoreModel.getDefault();
|
final CoreModel model = CoreModel.getDefault();
|
||||||
model.addElementChangedListener(fCModelListener);
|
model.addElementChangedListener(fCModelListener);
|
||||||
|
|
||||||
|
@ -164,6 +170,32 @@ public class PDOMManager implements IWritableIndexManager, IListener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void initializeDatabaseCache() {
|
||||||
|
adjustCacheSize();
|
||||||
|
CCorePlugin.getDefault().getPluginPreferences().addPropertyChangeListener(
|
||||||
|
new IPropertyChangeListener() {
|
||||||
|
public void propertyChange(PropertyChangeEvent event) {
|
||||||
|
String prop= event.getProperty();
|
||||||
|
if (prop.equals(CCorePreferenceConstants.INDEX_DB_CACHE_SIZE_PCT) ||
|
||||||
|
prop.equals(CCorePreferenceConstants.MAX_INDEX_DB_CACHE_SIZE_MB)) {
|
||||||
|
adjustCacheSize();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void adjustCacheSize() {
|
||||||
|
final Preferences prefs= CCorePlugin.getDefault().getPluginPreferences();
|
||||||
|
int cachePct= prefs.getInt(CCorePreferenceConstants.INDEX_DB_CACHE_SIZE_PCT);
|
||||||
|
int cacheMax= prefs.getInt(CCorePreferenceConstants.MAX_INDEX_DB_CACHE_SIZE_MB);
|
||||||
|
cachePct= Math.max(1, Math.min(50, cachePct)); // 1%-50%
|
||||||
|
cacheMax= Math.max(1, cacheMax); // >= 1mb
|
||||||
|
long m1= Runtime.getRuntime().maxMemory()/100L * cachePct;
|
||||||
|
long m2= Math.min(m1, cacheMax * 1024L * 1024L);
|
||||||
|
ChunkCache.getSharedInstance().setMaxSize(m2);
|
||||||
|
}
|
||||||
|
|
||||||
public IndexProviderManager getIndexProviderManager() {
|
public IndexProviderManager getIndexProviderManager() {
|
||||||
return manager;
|
return manager;
|
||||||
}
|
}
|
||||||
|
@ -527,28 +559,53 @@ public class PDOMManager implements IWritableIndexManager, IListener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void removeProject(ICProject project) {
|
public void deleteProject(ICProject cproject) {
|
||||||
|
removeProject(cproject, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void closeProject(ICProject cproject) {
|
||||||
|
removeProject(cproject, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void removeProject(ICProject project, final boolean delete) {
|
||||||
IPDOMIndexer indexer= getIndexer(project);
|
IPDOMIndexer indexer= getIndexer(project);
|
||||||
if (indexer != null) {
|
if (indexer != null) {
|
||||||
stopIndexer(indexer);
|
stopIndexer(indexer);
|
||||||
}
|
}
|
||||||
unregisterPreferenceListener(project);
|
unregisterPreferenceListener(project);
|
||||||
}
|
WritablePDOM pdom= null;
|
||||||
|
synchronized (fProjectToPDOM) {
|
||||||
|
IProject rproject= project.getProject();
|
||||||
|
pdom = (WritablePDOM) fProjectToPDOM.remove(rproject);
|
||||||
|
}
|
||||||
|
|
||||||
public void deleteProject(ICProject cproject, IResourceDelta delta) {
|
if (pdom != null) {
|
||||||
// Project is about to be deleted. Stop all indexing tasks for it
|
final WritablePDOM finalpdom= pdom;
|
||||||
IPDOMIndexer indexer = getIndexer(cproject);
|
Job job= new Job(Messages.PDOMManager_ClosePDOMJob) {
|
||||||
if (indexer != null) {
|
protected IStatus run(IProgressMonitor monitor) {
|
||||||
stopIndexer(indexer);
|
try {
|
||||||
}
|
finalpdom.acquireWriteLock();
|
||||||
unregisterPreferenceListener(cproject);
|
try {
|
||||||
|
finalpdom.close();
|
||||||
// remove entry for project from PDOM map
|
if (delete) {
|
||||||
synchronized (fProjectToPDOM) {
|
finalpdom.getDB().getLocation().delete();
|
||||||
IProject project= cproject.getProject();
|
}
|
||||||
fProjectToPDOM.remove(project);
|
} catch (CoreException e) {
|
||||||
}
|
CCorePlugin.log(e);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
finalpdom.releaseWriteLock();
|
||||||
|
}
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
}
|
||||||
|
return Status.OK_STATUS;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
job.setSystem(true);
|
||||||
|
job.schedule();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void stopIndexer(IPDOMIndexer indexer) {
|
private void stopIndexer(IPDOMIndexer indexer) {
|
||||||
|
@ -872,23 +929,26 @@ public class PDOMManager implements IWritableIndexManager, IListener {
|
||||||
|
|
||||||
// overwrite internal location representations
|
// overwrite internal location representations
|
||||||
final WritablePDOM newPDOM = new WritablePDOM(targetLocation, pdom.getLocationConverter());
|
final WritablePDOM newPDOM = new WritablePDOM(targetLocation, pdom.getLocationConverter());
|
||||||
|
|
||||||
newPDOM.acquireWriteLock();
|
|
||||||
try {
|
try {
|
||||||
List notConverted= newPDOM.rewriteLocations(newConverter);
|
newPDOM.acquireWriteLock();
|
||||||
|
try {
|
||||||
// remove content where converter returns null
|
List notConverted= newPDOM.rewriteLocations(newConverter);
|
||||||
for(Iterator i = notConverted.iterator(); i.hasNext(); ) {
|
|
||||||
PDOMFile file = (PDOMFile) i.next();
|
// remove content where converter returns null
|
||||||
file.clear();
|
for(Iterator i = notConverted.iterator(); i.hasNext(); ) {
|
||||||
|
PDOMFile file = (PDOMFile) i.next();
|
||||||
|
file.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure fragment id has a sensible value, in case callee's do not
|
||||||
|
// overwrite their own values
|
||||||
|
String oldId= pdom.getProperty(IIndexFragment.PROPERTY_FRAGMENT_ID);
|
||||||
|
newPDOM.setProperty(IIndexFragment.PROPERTY_FRAGMENT_ID, "exported."+oldId); //$NON-NLS-1$
|
||||||
|
} finally {
|
||||||
|
newPDOM.releaseWriteLock();
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure fragment id has a sensible value, in case callee's do not
|
|
||||||
// overwrite their own values
|
|
||||||
String oldId= pdom.getProperty(IIndexFragment.PROPERTY_FRAGMENT_ID);
|
|
||||||
newPDOM.setProperty(IIndexFragment.PROPERTY_FRAGMENT_ID, "exported."+oldId); //$NON-NLS-1$
|
|
||||||
} finally {
|
} finally {
|
||||||
newPDOM.releaseWriteLock();
|
newPDOM.close();
|
||||||
}
|
}
|
||||||
} catch(IOException ioe) {
|
} catch(IOException ioe) {
|
||||||
throw new CoreException(CCorePlugin.createStatus(ioe.getMessage()));
|
throw new CoreException(CCorePlugin.createStatus(ioe.getMessage()));
|
||||||
|
|
|
@ -96,8 +96,13 @@ public class TeamPDOMExportOperation implements IWorkspaceRunnable {
|
||||||
|
|
||||||
// create checksums
|
// create checksums
|
||||||
PDOM pdom= new PDOM(tmpPDOM, converter);
|
PDOM pdom= new PDOM(tmpPDOM, converter);
|
||||||
monitor.setTaskName(Messages.Checksums_taskComputeChecksums);
|
try {
|
||||||
createChecksums(fProject, pdom, tmpChecksums, subMonitor(monitor, 94));
|
monitor.setTaskName(Messages.Checksums_taskComputeChecksums);
|
||||||
|
createChecksums(fProject, pdom, tmpChecksums, subMonitor(monitor, 94));
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
pdom.close();
|
||||||
|
}
|
||||||
|
|
||||||
// create archive
|
// create archive
|
||||||
createArchive(tmpPDOM, tmpChecksums);
|
createArchive(tmpPDOM, tmpChecksums);
|
||||||
|
@ -143,11 +148,6 @@ public class TeamPDOMExportOperation implements IWorkspaceRunnable {
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
pdom.releaseReadLock();
|
pdom.releaseReadLock();
|
||||||
try {
|
|
||||||
pdom.getDB().close();
|
|
||||||
} catch (IOException e) {
|
|
||||||
CCorePlugin.log(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
int i=0;
|
int i=0;
|
||||||
IWorkspaceRoot root= ResourcesPlugin.getWorkspace().getRoot();
|
IWorkspaceRoot root= ResourcesPlugin.getWorkspace().getRoot();
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.eclipse.cdt.core.index.IIndexFileLocation;
|
||||||
import org.eclipse.cdt.core.index.IIndexLocationConverter;
|
import org.eclipse.cdt.core.index.IIndexLocationConverter;
|
||||||
import org.eclipse.cdt.internal.core.index.IIndexFragmentFile;
|
import org.eclipse.cdt.internal.core.index.IIndexFragmentFile;
|
||||||
import org.eclipse.cdt.internal.core.index.IWritableIndexFragment;
|
import org.eclipse.cdt.internal.core.index.IWritableIndexFragment;
|
||||||
|
import org.eclipse.cdt.internal.core.pdom.db.ChunkCache;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
|
import org.eclipse.cdt.internal.core.pdom.db.DBProperties;
|
||||||
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
import org.eclipse.cdt.internal.core.pdom.db.IBTreeVisitor;
|
||||||
import org.eclipse.cdt.internal.core.pdom.dom.PDOMBinding;
|
import org.eclipse.cdt.internal.core.pdom.dom.PDOMBinding;
|
||||||
|
@ -36,8 +37,13 @@ import org.eclipse.core.runtime.CoreException;
|
||||||
public class WritablePDOM extends PDOM implements IWritableIndexFragment {
|
public class WritablePDOM extends PDOM implements IWritableIndexFragment {
|
||||||
|
|
||||||
public WritablePDOM(File dbPath, IIndexLocationConverter locationConverter) throws CoreException {
|
public WritablePDOM(File dbPath, IIndexLocationConverter locationConverter) throws CoreException {
|
||||||
super(dbPath, locationConverter);
|
this(dbPath, locationConverter, ChunkCache.getSharedInstance());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public WritablePDOM(File dbPath, IIndexLocationConverter locationConverter, ChunkCache cache) throws CoreException {
|
||||||
|
super(dbPath, locationConverter, cache);
|
||||||
|
}
|
||||||
|
|
||||||
public IIndexFragmentFile addFile(IIndexFileLocation location) throws CoreException {
|
public IIndexFragmentFile addFile(IIndexFileLocation location) throws CoreException {
|
||||||
return super.addFile(location);
|
return super.addFile(location);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2005, 2006 QNX Software Systems and others.
|
* Copyright (c) 2005, 2007 QNX Software Systems and others.
|
||||||
* All rights reserved. This program and the accompanying materials
|
* All rights reserved. This program and the accompanying materials
|
||||||
* are made available under the terms of the Eclipse Public License v1.0
|
* are made available under the terms of the Eclipse Public License v1.0
|
||||||
* which accompanies this distribution, and is available at
|
* which accompanies this distribution, and is available at
|
||||||
|
@ -13,113 +13,111 @@
|
||||||
package org.eclipse.cdt.internal.core.pdom.db;
|
package org.eclipse.cdt.internal.core.pdom.db;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.RandomAccessFile;
|
import java.nio.ByteBuffer;
|
||||||
import java.lang.ref.ReferenceQueue;
|
|
||||||
import java.lang.ref.WeakReference;
|
|
||||||
import java.nio.MappedByteBuffer;
|
|
||||||
import java.nio.channels.FileChannel.MapMode;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import org.eclipse.core.runtime.CoreException;
|
import org.eclipse.core.runtime.CoreException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @author Doug Schaefer
|
* Caches the content of a piece of the database.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
public class Chunk {
|
final class Chunk {
|
||||||
|
final private ByteBuffer fBuffer;
|
||||||
|
|
||||||
private MappedByteBuffer buffer;
|
final Database fDatabase;
|
||||||
|
final int fSequenceNumber;
|
||||||
|
|
||||||
// Cache info
|
boolean fCacheHitFlag= false;
|
||||||
private Database db;
|
boolean fDirty= false;
|
||||||
int index;
|
boolean fWritable= false;
|
||||||
|
int fCacheIndex= -1;
|
||||||
Chunk(RandomAccessFile file, int offset) throws CoreException {
|
|
||||||
|
Chunk(Database db, int sequenceNumber) throws CoreException {
|
||||||
|
fDatabase= db;
|
||||||
|
fBuffer= ByteBuffer.allocate(Database.CHUNK_SIZE);
|
||||||
|
fSequenceNumber= sequenceNumber;
|
||||||
try {
|
try {
|
||||||
index = offset / Database.CHUNK_SIZE;
|
fDatabase.getChannel().read(fBuffer, fSequenceNumber*Database.CHUNK_SIZE);
|
||||||
buffer = file.getChannel().map(MapMode.READ_WRITE, offset, Database.CHUNK_SIZE);
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new CoreException(new DBStatus(e));
|
throw new CoreException(new DBStatus(e));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void flush() throws CoreException {
|
||||||
|
try {
|
||||||
|
fBuffer.position(0);
|
||||||
|
fDatabase.getChannel().write(fBuffer, fSequenceNumber*Database.CHUNK_SIZE);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new CoreException(new DBStatus(e));
|
||||||
|
}
|
||||||
|
fDirty= false;
|
||||||
|
}
|
||||||
|
|
||||||
public void putByte(int offset, byte value) {
|
public void putByte(int offset, byte value) {
|
||||||
buffer.put(offset % Database.CHUNK_SIZE, value);
|
fDirty= true;
|
||||||
|
fBuffer.put(offset % Database.CHUNK_SIZE, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
public byte getByte(int offset) {
|
public byte getByte(int offset) {
|
||||||
return buffer.get(offset % Database.CHUNK_SIZE);
|
return fBuffer.get(offset % Database.CHUNK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
public byte[] getBytes(int offset, int length) {
|
public byte[] getBytes(int offset, int length) {
|
||||||
byte[] bytes = new byte[length];
|
byte[] bytes = new byte[length];
|
||||||
buffer.position(offset % Database.CHUNK_SIZE);
|
fBuffer.position(offset % Database.CHUNK_SIZE);
|
||||||
buffer.get(bytes, 0, length);
|
fBuffer.get(bytes, 0, length);
|
||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putBytes(int offset, byte[] bytes) {
|
public void putBytes(int offset, byte[] bytes) {
|
||||||
buffer.position(offset % Database.CHUNK_SIZE);
|
fDirty= true;
|
||||||
buffer.put(bytes, 0, bytes.length);
|
fBuffer.position(offset % Database.CHUNK_SIZE);
|
||||||
|
fBuffer.put(bytes, 0, bytes.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putInt(int offset, int value) {
|
public void putInt(int offset, int value) {
|
||||||
buffer.putInt(offset % Database.CHUNK_SIZE, value);
|
fDirty= true;
|
||||||
|
fBuffer.putInt(offset % Database.CHUNK_SIZE, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getInt(int offset) {
|
public int getInt(int offset) {
|
||||||
return buffer.getInt(offset % Database.CHUNK_SIZE);
|
return fBuffer.getInt(offset % Database.CHUNK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putShort(int offset, short value) {
|
public void putShort(int offset, short value) {
|
||||||
buffer.putShort(offset % Database.CHUNK_SIZE, value);
|
fDirty= true;
|
||||||
|
fBuffer.putShort(offset % Database.CHUNK_SIZE, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
public short getShort(int offset) {
|
public short getShort(int offset) {
|
||||||
return buffer.getShort(offset % Database.CHUNK_SIZE);
|
return fBuffer.getShort(offset % Database.CHUNK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getLong(int offset) {
|
public long getLong(int offset) {
|
||||||
return buffer.getLong(offset % Database.CHUNK_SIZE);
|
return fBuffer.getLong(offset % Database.CHUNK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putLong(int offset, long value) {
|
public void putLong(int offset, long value) {
|
||||||
buffer.putLong(offset % Database.CHUNK_SIZE, value);
|
fDirty= true;
|
||||||
|
fBuffer.putLong(offset % Database.CHUNK_SIZE, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putChar(int offset, char value) {
|
public void putChar(int offset, char value) {
|
||||||
buffer.putChar(offset % Database.CHUNK_SIZE, value);
|
fDirty= true;
|
||||||
|
fBuffer.putChar(offset % Database.CHUNK_SIZE, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
public char getChar(int offset) {
|
public char getChar(int offset) {
|
||||||
return buffer.getChar(offset % Database.CHUNK_SIZE);
|
return fBuffer.getChar(offset % Database.CHUNK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void getCharArray(int offset, char[] result) {
|
public void getCharArray(int offset, char[] result) {
|
||||||
buffer.position(offset % Database.CHUNK_SIZE);
|
fBuffer.position(offset % Database.CHUNK_SIZE);
|
||||||
buffer.asCharBuffer().get(result);
|
fBuffer.asCharBuffer().get(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
void clear(int offset, int length) {
|
void clear(int offset, int length) {
|
||||||
buffer.position(offset % Database.CHUNK_SIZE);
|
fDirty= true;
|
||||||
buffer.put(new byte[length]);
|
fBuffer.position(offset % Database.CHUNK_SIZE);
|
||||||
}
|
fBuffer.put(new byte[length]);
|
||||||
|
|
||||||
/**
|
|
||||||
* Allow this Chunk to be reclaimed. Objects allocated by thus Chunk
|
|
||||||
* may be registered with a ReferenceQueue to allow for notification
|
|
||||||
* on deallocation. References registered with the queue are added to
|
|
||||||
* the Set references.
|
|
||||||
*
|
|
||||||
* @param queue ReferenceQueue to register allocated objects with, or
|
|
||||||
* null if notification is not required.
|
|
||||||
* @param references Populated with references which were registered
|
|
||||||
* with the queue.
|
|
||||||
*/
|
|
||||||
void reclaim(ReferenceQueue queue, Set references) {
|
|
||||||
if (queue != null) {
|
|
||||||
references.add(new WeakReference(buffer, queue));
|
|
||||||
}
|
|
||||||
buffer = null;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,134 @@
|
||||||
|
/*******************************************************************************
|
||||||
|
* Copyright (c) 2007 Wind River Systems, Inc. and others.
|
||||||
|
* All rights reserved. This program and the accompanying materials
|
||||||
|
* are made available under the terms of the Eclipse Public License v1.0
|
||||||
|
* which accompanies this distribution, and is available at
|
||||||
|
* http://www.eclipse.org/legal/epl-v10.html
|
||||||
|
*
|
||||||
|
* Contributors:
|
||||||
|
* Markus Schorn - initial API and implementation
|
||||||
|
*******************************************************************************/
|
||||||
|
|
||||||
|
package org.eclipse.cdt.internal.core.pdom.db;
|
||||||
|
|
||||||
|
public final class ChunkCache {
|
||||||
|
private static ChunkCache sSharedInstance= new ChunkCache();
|
||||||
|
|
||||||
|
private Chunk[] fPageTable;
|
||||||
|
private boolean fTableIsFull= false;
|
||||||
|
private int fPointer= 0;
|
||||||
|
|
||||||
|
public static ChunkCache getSharedInstance() {
|
||||||
|
return sSharedInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ChunkCache() {
|
||||||
|
this(5*1024*1024);
|
||||||
|
}
|
||||||
|
|
||||||
|
public ChunkCache(long maxSize) {
|
||||||
|
fPageTable= new Chunk[computeLength(maxSize)];
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void add(Chunk chunk, boolean writable) {
|
||||||
|
if (writable) {
|
||||||
|
chunk.fWritable= true;
|
||||||
|
}
|
||||||
|
if (chunk.fCacheIndex >= 0) {
|
||||||
|
chunk.fCacheHitFlag= true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (fTableIsFull) {
|
||||||
|
evictChunk();
|
||||||
|
chunk.fCacheIndex= fPointer;
|
||||||
|
fPageTable[fPointer]= chunk;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
chunk.fCacheIndex= fPointer;
|
||||||
|
fPageTable[fPointer]= chunk;
|
||||||
|
|
||||||
|
fPointer++;
|
||||||
|
if (fPointer == fPageTable.length) {
|
||||||
|
fPointer= 0;
|
||||||
|
fTableIsFull= true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Evicts a chunk from the page table and the chunk table.
|
||||||
|
* After this method returns, {@link #fPointer} will contain
|
||||||
|
* the index of the evicted chunk within the page table.
|
||||||
|
*/
|
||||||
|
private void evictChunk() {
|
||||||
|
/*
|
||||||
|
* Use the CLOCK algorithm to determine which chunk to evict.
|
||||||
|
* i.e., if the chunk in the current slot of the page table has been
|
||||||
|
* recently referenced (i.e. the reference flag is set), unset the
|
||||||
|
* reference flag and move to the next slot. Otherwise, evict the
|
||||||
|
* chunk in the current slot.
|
||||||
|
*/
|
||||||
|
while (true) {
|
||||||
|
Chunk chunk = fPageTable[fPointer];
|
||||||
|
if (chunk.fCacheHitFlag) {
|
||||||
|
chunk.fCacheHitFlag= false;
|
||||||
|
fPointer= (fPointer + 1) % fPageTable.length;
|
||||||
|
} else {
|
||||||
|
chunk.fDatabase.releaseChunk(chunk);
|
||||||
|
chunk.fCacheIndex= -1;
|
||||||
|
fPageTable[fPointer] = null;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void remove(Chunk chunk) {
|
||||||
|
final int idx= chunk.fCacheIndex;
|
||||||
|
if (idx >= 0) {
|
||||||
|
if (fTableIsFull) {
|
||||||
|
fPointer= fPageTable.length-1;
|
||||||
|
fTableIsFull= false;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
fPointer--;
|
||||||
|
}
|
||||||
|
chunk.fCacheIndex= -1;
|
||||||
|
final Chunk move= fPageTable[fPointer];
|
||||||
|
fPageTable[idx]= move;
|
||||||
|
move.fCacheIndex= idx;
|
||||||
|
fPageTable[fPointer]= null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clears the page table and changes it to hold chunks with
|
||||||
|
* maximum total memory of <code>maxSize</code>.
|
||||||
|
* @param maxSize the total size of the chunks in bytes.
|
||||||
|
*/
|
||||||
|
public synchronized void setMaxSize(long maxSize) {
|
||||||
|
final int newLength= computeLength(maxSize);
|
||||||
|
final int oldLength= fTableIsFull ? fPageTable.length : fPointer;
|
||||||
|
if (newLength > oldLength) {
|
||||||
|
Chunk[] newTable= new Chunk[newLength];
|
||||||
|
System.arraycopy(fPageTable, 0, newTable, 0, oldLength);
|
||||||
|
fTableIsFull= false;
|
||||||
|
fPointer= oldLength;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
for (int i=newLength; i<oldLength; i++) {
|
||||||
|
final Chunk chunk= fPageTable[i];
|
||||||
|
chunk.fDatabase.releaseChunk(chunk);
|
||||||
|
chunk.fCacheIndex= -1;
|
||||||
|
}
|
||||||
|
Chunk[] newTable= new Chunk[newLength];
|
||||||
|
System.arraycopy(fPageTable, 0, newTable, 0, newLength);
|
||||||
|
fTableIsFull= true;
|
||||||
|
fPointer= 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private int computeLength(long maxSize) {
|
||||||
|
long maxLength= Math.min(maxSize/Database.CHUNK_SIZE, Integer.MAX_VALUE);
|
||||||
|
return Math.max(1, (int)maxLength);
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2005, 2006 QNX Software Systems and others.
|
* Copyright (c) 2005, 2007 QNX Software Systems and others.
|
||||||
* All rights reserved. This program and the accompanying materials
|
* All rights reserved. This program and the accompanying materials
|
||||||
* are made available under the terms of the Eclipse Public License v1.0
|
* are made available under the terms of the Eclipse Public License v1.0
|
||||||
* which accompanies this distribution, and is available at
|
* which accompanies this distribution, and is available at
|
||||||
|
@ -16,9 +16,9 @@ package org.eclipse.cdt.internal.core.pdom.db;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.RandomAccessFile;
|
import java.io.RandomAccessFile;
|
||||||
import java.lang.ref.ReferenceQueue;
|
import java.nio.channels.FileChannel;
|
||||||
import java.util.HashSet;
|
import java.util.ArrayList;
|
||||||
import java.util.Set;
|
import java.util.Iterator;
|
||||||
|
|
||||||
import org.eclipse.cdt.core.CCorePlugin;
|
import org.eclipse.cdt.core.CCorePlugin;
|
||||||
import org.eclipse.core.runtime.CoreException;
|
import org.eclipse.core.runtime.CoreException;
|
||||||
|
@ -61,10 +61,12 @@ public class Database {
|
||||||
|
|
||||||
private final File location;
|
private final File location;
|
||||||
private final RandomAccessFile file;
|
private final RandomAccessFile file;
|
||||||
Chunk[] toc;
|
private boolean fWritable= false;
|
||||||
|
private Chunk[] chunks;
|
||||||
|
|
||||||
private long malloced;
|
private long malloced;
|
||||||
private long freed;
|
private long freed;
|
||||||
|
private ChunkCache fCache;
|
||||||
|
|
||||||
// public for tests only, you shouldn't need these
|
// public for tests only, you shouldn't need these
|
||||||
public static final int VERSION_OFFSET = 0;
|
public static final int VERSION_OFFSET = 0;
|
||||||
|
@ -78,32 +80,36 @@ public class Database {
|
||||||
|
|
||||||
public static final int MAX_SIZE = CHUNK_SIZE - 4; // Room for overhead
|
public static final int MAX_SIZE = CHUNK_SIZE - 4; // Room for overhead
|
||||||
|
|
||||||
public Database(File location) throws CoreException {
|
public Database(File location, ChunkCache cache, int version) throws CoreException {
|
||||||
try {
|
try {
|
||||||
this.location = location;
|
this.location = location;
|
||||||
this.file = new RandomAccessFile(location, "rw"); //$NON-NLS-1$
|
this.file = new RandomAccessFile(location, "rw"); //$NON-NLS-1$
|
||||||
|
fCache= cache;
|
||||||
|
|
||||||
// Allocate chunk table, make sure we have at least one
|
// Allocate chunk table, make sure we have at least one
|
||||||
long nChunks = file.length() / CHUNK_SIZE;
|
long nChunks = file.length() / CHUNK_SIZE;
|
||||||
|
chunks = new Chunk[(int)nChunks];
|
||||||
if (nChunks == 0) {
|
if (nChunks == 0) {
|
||||||
file.seek(0);
|
setWritable();
|
||||||
file.write(new byte[CHUNK_SIZE]); // the header chunk
|
createNewChunk();
|
||||||
++nChunks;
|
setVersion(version);
|
||||||
|
setReadOnly();
|
||||||
}
|
}
|
||||||
|
|
||||||
toc = new Chunk[(int)nChunks];
|
|
||||||
toc[0] = new Chunk(file, 0);
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new CoreException(new DBStatus(e));
|
throw new CoreException(new DBStatus(e));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getVersion() {
|
public FileChannel getChannel() {
|
||||||
return toc[0].getInt(0);
|
return file.getChannel();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setVersion(int version) {
|
public int getVersion() throws CoreException {
|
||||||
toc[0].putInt(0, version);
|
return getChunk(0).getInt(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setVersion(int version) throws CoreException {
|
||||||
|
getChunk(0).putInt(0, version);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -111,93 +117,66 @@ public class Database {
|
||||||
* @throws CoreException
|
* @throws CoreException
|
||||||
*/
|
*/
|
||||||
public void clear(long timeout) throws CoreException {
|
public void clear(long timeout) throws CoreException {
|
||||||
// Clear out the memory headers
|
int version= getVersion();
|
||||||
toc[0].clear(4, DATA_AREA - 4);
|
removeChunksFromCache();
|
||||||
|
|
||||||
if (!truncate(timeout)) {
|
// clear out memory headers
|
||||||
// Truncation timed out so the database size couldn't be changed.
|
Chunk header= getChunk(0);
|
||||||
// The best we can do is mark all chunks as unallocated blocks.
|
setVersion(version);
|
||||||
|
header.clear(4, DATA_AREA - 4);
|
||||||
|
chunks = new Chunk[] {header};
|
||||||
|
|
||||||
// Since the block list grows at the head, add all non-header
|
try {
|
||||||
// chunks backwards to ensure list of blocks is ordered first
|
getChannel().truncate(CHUNK_SIZE);
|
||||||
// to last.
|
}
|
||||||
for (int block = (toc.length - 1) * CHUNK_SIZE; block > 0; block -= CHUNK_SIZE) {
|
catch (IOException e) {
|
||||||
addBlock(getChunk(block), CHUNK_SIZE, block);
|
CCorePlugin.log(e);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
malloced = freed = 0;
|
malloced = freed = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private void removeChunksFromCache() {
|
||||||
* Truncate the database as small as possible to reclaim disk space.
|
synchronized (fCache) {
|
||||||
* This method returns false if truncation does not succeed within the
|
for (int i = 0; i < chunks.length; i++) {
|
||||||
* given timeout period (in milliseconds). A timeout of 0 will cause
|
Chunk chunk= chunks[i];
|
||||||
* this method to block until the database is successfully truncated.
|
if (chunk != null) {
|
||||||
*
|
fCache.remove(chunk);
|
||||||
* @param timeout maximum amount of milliseconds to wait before giving up;
|
chunks[i]= null;
|
||||||
* 0 means wait indefinitely.
|
}
|
||||||
* @return true if truncation succeeds; false if the operation times out.
|
|
||||||
* @throws CoreException if an IO error occurs during truncation
|
|
||||||
*/
|
|
||||||
private boolean truncate(long timeout) throws CoreException {
|
|
||||||
// Queue all the chunks to be reclaimed.
|
|
||||||
ReferenceQueue queue = new ReferenceQueue();
|
|
||||||
Set references = new HashSet();
|
|
||||||
int totalChunks = toc.length;
|
|
||||||
for (int i = 0; i < toc.length; i++) {
|
|
||||||
if (toc[i] != null) {
|
|
||||||
toc[i].reclaim(queue, references);
|
|
||||||
toc[i] = null;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
System.gc();
|
|
||||||
try {
|
|
||||||
// Wait for each chunk to be reclaimed.
|
|
||||||
int totalReclaimed = references.size();
|
|
||||||
while (totalReclaimed > 0) {
|
|
||||||
queue.remove(timeout);
|
|
||||||
totalReclaimed--;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Truncate everything but the header chunk.
|
|
||||||
try {
|
|
||||||
file.getChannel().truncate(CHUNK_SIZE);
|
|
||||||
// Reinitialize header chunk.
|
|
||||||
toc = new Chunk[] { new Chunk(file, 0) };
|
|
||||||
return true;
|
|
||||||
} catch (IOException e) {
|
|
||||||
// Bug 168420:
|
|
||||||
// Truncation failed so we'll reuse the existing
|
|
||||||
// file.
|
|
||||||
toc = new Chunk[totalChunks];
|
|
||||||
toc[0] = new Chunk(file, 0);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (InterruptedException e) {
|
|
||||||
// Truncation took longer than we wanted, so we'll
|
|
||||||
// reinitialize the header chunk and leave the file
|
|
||||||
// size alone.
|
|
||||||
toc[0] = new Chunk(file, 0);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the Chunk that contains the given offset.
|
* Return the Chunk that contains the given offset.
|
||||||
*
|
* @throws CoreException
|
||||||
* @param offset
|
|
||||||
* @return
|
|
||||||
*/
|
*/
|
||||||
public Chunk getChunk(int offset) throws CoreException {
|
public Chunk getChunk(int offset) throws CoreException {
|
||||||
int index = offset / CHUNK_SIZE;
|
int index = offset / CHUNK_SIZE;
|
||||||
Chunk chunk = toc[index];
|
|
||||||
if (chunk == null) {
|
// for performance reasons try to find chunk and mark it without
|
||||||
chunk = toc[index] = new Chunk(file, index * CHUNK_SIZE);
|
// synchronizing. This means that we might pick up a chunk that
|
||||||
|
// has been paged out, which is ok.
|
||||||
|
// Furthermore the hitflag may not be seen by the clock-alorithm,
|
||||||
|
// which might lead to the eviction of a chunk. With the next
|
||||||
|
// cache failure we are in sync again, though.
|
||||||
|
Chunk chunk = chunks[index];
|
||||||
|
if (chunk != null && chunk.fWritable == fWritable) {
|
||||||
|
chunk.fCacheHitFlag= true;
|
||||||
|
return chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
return chunk;
|
// here is the safe code that has to be performed if we cannot
|
||||||
|
// get ahold of the chunk.
|
||||||
|
synchronized(fCache) {
|
||||||
|
chunk= chunks[index];
|
||||||
|
if (chunk == null) {
|
||||||
|
chunk = chunks[index] = new Chunk(this, index);
|
||||||
|
}
|
||||||
|
fCache.add(chunk, fWritable);
|
||||||
|
return chunk;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -229,11 +208,10 @@ public class Database {
|
||||||
// get the block
|
// get the block
|
||||||
Chunk chunk;
|
Chunk chunk;
|
||||||
if (freeblock == 0) {
|
if (freeblock == 0) {
|
||||||
// Out of memory, allocate a new chunk
|
// allocate a new chunk
|
||||||
int i = createChunk();
|
freeblock= createNewChunk();
|
||||||
chunk = toc[i];
|
|
||||||
freeblock = i * CHUNK_SIZE;
|
|
||||||
blocksize = CHUNK_SIZE;
|
blocksize = CHUNK_SIZE;
|
||||||
|
chunk = getChunk(freeblock);
|
||||||
} else {
|
} else {
|
||||||
chunk = getChunk(freeblock);
|
chunk = getChunk(freeblock);
|
||||||
removeBlock(chunk, blocksize, freeblock);
|
removeBlock(chunk, blocksize, freeblock);
|
||||||
|
@ -254,28 +232,27 @@ public class Database {
|
||||||
return freeblock + 4;
|
return freeblock + 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
private int createChunk() throws CoreException {
|
private int createNewChunk() throws CoreException {
|
||||||
try {
|
try {
|
||||||
Chunk[] oldtoc = toc;
|
Chunk[] oldtoc = chunks;
|
||||||
int n = oldtoc.length;
|
int n = oldtoc.length;
|
||||||
int offset = n * CHUNK_SIZE;
|
int offset = n * CHUNK_SIZE;
|
||||||
file.seek(offset);
|
file.seek(offset);
|
||||||
file.write(new byte[CHUNK_SIZE]);
|
file.write(new byte[CHUNK_SIZE]);
|
||||||
toc = new Chunk[n + 1];
|
chunks = new Chunk[n + 1];
|
||||||
System.arraycopy(oldtoc, 0, toc, 0, n);
|
System.arraycopy(oldtoc, 0, chunks, 0, n);
|
||||||
toc[n] = new Chunk(file, offset);
|
return offset;
|
||||||
return n;
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new CoreException(new DBStatus(e));
|
throw new CoreException(new DBStatus(e));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private int getFirstBlock(int blocksize) {
|
private int getFirstBlock(int blocksize) throws CoreException {
|
||||||
return toc[0].getInt((blocksize / MIN_SIZE) * INT_SIZE);
|
return getChunk(0).getInt((blocksize / MIN_SIZE) * INT_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setFirstBlock(int blocksize, int block) {
|
private void setFirstBlock(int blocksize, int block) throws CoreException {
|
||||||
toc[0].putInt((blocksize / MIN_SIZE) * INT_SIZE, block);
|
getChunk(0).putInt((blocksize / MIN_SIZE) * INT_SIZE, block);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void removeBlock(Chunk chunk, int blocksize, int block) throws CoreException {
|
private void removeBlock(Chunk chunk, int blocksize, int block) throws CoreException {
|
||||||
|
@ -321,53 +298,43 @@ public class Database {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putByte(int offset, byte value) throws CoreException {
|
public void putByte(int offset, byte value) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
getChunk(offset).putByte(offset, value);
|
||||||
chunk.putByte(offset, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public byte getByte(int offset) throws CoreException {
|
public byte getByte(int offset) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
return getChunk(offset).getByte(offset);
|
||||||
return chunk.getByte(offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putInt(int offset, int value) throws CoreException {
|
public void putInt(int offset, int value) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
getChunk(offset).putInt(offset, value);
|
||||||
chunk.putInt(offset, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getInt(int offset) throws CoreException {
|
public int getInt(int offset) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
return getChunk(offset).getInt(offset);
|
||||||
return chunk.getInt(offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putShort(int offset, short value) throws CoreException {
|
public void putShort(int offset, short value) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
getChunk(offset).putShort(offset, value);
|
||||||
chunk.putShort(offset, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public short getShort(int offset) throws CoreException {
|
public short getShort(int offset) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
return getChunk(offset).getShort(offset);
|
||||||
return chunk.getShort(offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putLong(int offset, long value) throws CoreException {
|
public void putLong(int offset, long value) throws CoreException {
|
||||||
Chunk chunk= getChunk(offset);
|
getChunk(offset).putLong(offset, value);
|
||||||
chunk.putLong(offset, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getLong(int offset) throws CoreException {
|
public long getLong(int offset) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
return getChunk(offset).getLong(offset);
|
||||||
return chunk.getLong(offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void putChar(int offset, char value) throws CoreException {
|
public void putChar(int offset, char value) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
getChunk(offset).putChar(offset, value);
|
||||||
chunk.putChar(offset, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public char getChar(int offset) throws CoreException {
|
public char getChar(int offset) throws CoreException {
|
||||||
Chunk chunk = getChunk(offset);
|
return getChunk(offset).getChar(offset);
|
||||||
return chunk.getChar(offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public IString newString(String string) throws CoreException {
|
public IString newString(String string) throws CoreException {
|
||||||
|
@ -392,15 +359,15 @@ public class Database {
|
||||||
return new ShortString(this, offset);
|
return new ShortString(this, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getNumChunks() {
|
public int getChunkCount() {
|
||||||
return toc.length;
|
return chunks.length;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void reportFreeBlocks() throws CoreException {
|
public void reportFreeBlocks() throws CoreException {
|
||||||
System.out.println("Allocated size: " + toc.length * CHUNK_SIZE); //$NON-NLS-1$
|
System.out.println("Allocated size: " + chunks.length * CHUNK_SIZE); //$NON-NLS-1$
|
||||||
System.out.println("malloc'ed: " + malloced); //$NON-NLS-1$
|
System.out.println("malloc'ed: " + malloced); //$NON-NLS-1$
|
||||||
System.out.println("free'd: " + freed); //$NON-NLS-1$
|
System.out.println("free'd: " + freed); //$NON-NLS-1$
|
||||||
System.out.println("wasted: " + (toc.length * CHUNK_SIZE - (malloced - freed))); //$NON-NLS-1$
|
System.out.println("wasted: " + (chunks.length * CHUNK_SIZE - (malloced - freed))); //$NON-NLS-1$
|
||||||
System.out.println("Free blocks"); //$NON-NLS-1$
|
System.out.println("Free blocks"); //$NON-NLS-1$
|
||||||
for (int bs = MIN_SIZE; bs <= CHUNK_SIZE; bs += MIN_SIZE) {
|
for (int bs = MIN_SIZE; bs <= CHUNK_SIZE; bs += MIN_SIZE) {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
@ -413,15 +380,23 @@ public class Database {
|
||||||
System.out.println("Block size: " + bs + "=" + count); //$NON-NLS-1$ //$NON-NLS-2$
|
System.out.println("Block size: " + bs + "=" + count); //$NON-NLS-1$ //$NON-NLS-2$
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Closes the database, releasing the file lock. This is public for testing purposes only.
|
* Closes the database.
|
||||||
* <p>
|
* <p>
|
||||||
* The behaviour of any further calls to the Database is undefined
|
* The behaviour of any further calls to the Database is undefined
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
* @throws CoreException
|
||||||
*/
|
*/
|
||||||
public void close() throws IOException {
|
public void close() throws CoreException {
|
||||||
file.close();
|
setReadOnly();
|
||||||
|
removeChunksFromCache();
|
||||||
|
chunks= new Chunk[0];
|
||||||
|
try {
|
||||||
|
file.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new CoreException(new DBStatus(e));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -430,4 +405,56 @@ public class Database {
|
||||||
public File getLocation() {
|
public File getLocation() {
|
||||||
return location;
|
return location;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called from any thread via the cache, protected by {@link #fCache}.
|
||||||
|
*/
|
||||||
|
void releaseChunk(Chunk chunk) {
|
||||||
|
if (!chunk.fWritable)
|
||||||
|
chunks[chunk.fSequenceNumber]= null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the cache used for this database.
|
||||||
|
* @since 4.0
|
||||||
|
*/
|
||||||
|
public ChunkCache getChunkCache() {
|
||||||
|
return fCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setWritable() {
|
||||||
|
fWritable= true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setReadOnly() throws CoreException {
|
||||||
|
if (fWritable) {
|
||||||
|
fWritable= false;
|
||||||
|
flushDirtyChunks();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void flushDirtyChunks() throws CoreException {
|
||||||
|
ArrayList dirtyChunks= new ArrayList();
|
||||||
|
synchronized (fCache) {
|
||||||
|
for (int i = 0; i < chunks.length; i++) {
|
||||||
|
Chunk chunk= chunks[i];
|
||||||
|
if (chunk != null && chunk.fWritable) {
|
||||||
|
chunk.fWritable= false;
|
||||||
|
if (chunk.fCacheIndex < 0) {
|
||||||
|
chunks[i]= null;
|
||||||
|
}
|
||||||
|
if (chunk.fDirty) {
|
||||||
|
dirtyChunks.add(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dirtyChunks.isEmpty()) {
|
||||||
|
for (Iterator it = dirtyChunks.iterator(); it.hasNext();) {
|
||||||
|
Chunk chunk = (Chunk) it.next();
|
||||||
|
chunk.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ WritablePDOM_error_unknownLinkage=AST specifies unknown linkage ''{0}''
|
||||||
PDOMManager_notifyJob_label=Notify Index Change Listeners
|
PDOMManager_notifyJob_label=Notify Index Change Listeners
|
||||||
PDOMManager_JoinIndexerTask=Join Indexer
|
PDOMManager_JoinIndexerTask=Join Indexer
|
||||||
PDOMManager_StartJob_name=Initialize Indexing
|
PDOMManager_StartJob_name=Initialize Indexing
|
||||||
|
PDOMManager_ClosePDOMJob=Close database
|
||||||
PDOMManager_notifyTask_message=Notify Listeners
|
PDOMManager_notifyTask_message=Notify Listeners
|
||||||
PDOMManager_indexMonitorDetail={0}/{1} sources, {2} headers
|
PDOMManager_indexMonitorDetail={0}/{1} sources, {2} headers
|
||||||
PDOMManager_ExistingFileCollides=A pdom already exists at location {0}
|
PDOMManager_ExistingFileCollides=A pdom already exists at location {0}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2000, 2005 QNX Software Systems and others.
|
* Copyright (c) 2000, 2007 QNX Software Systems and others.
|
||||||
* All rights reserved. This program and the accompanying materials
|
* All rights reserved. This program and the accompanying materials
|
||||||
* are made available under the terms of the Eclipse Public License v1.0
|
* are made available under the terms of the Eclipse Public License v1.0
|
||||||
* which accompanies this distribution, and is available at
|
* which accompanies this distribution, and is available at
|
||||||
|
@ -7,6 +7,7 @@
|
||||||
*
|
*
|
||||||
* Contributors:
|
* Contributors:
|
||||||
* QNX Software Systems - Initial API and implementation
|
* QNX Software Systems - Initial API and implementation
|
||||||
|
* Markus Schorn (Wind River Systems)
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
package org.eclipse.cdt.core;
|
package org.eclipse.cdt.core;
|
||||||
|
|
||||||
|
@ -44,5 +45,24 @@ public class CCorePreferenceConstants {
|
||||||
* Default code formatter
|
* Default code formatter
|
||||||
*/
|
*/
|
||||||
public static final String DEFAULT_CODE_FORMATTER = CCorePlugin.PLUGIN_ID + ".defaultCodeFormatter"; //$NON-NLS-1$
|
public static final String DEFAULT_CODE_FORMATTER = CCorePlugin.PLUGIN_ID + ".defaultCodeFormatter"; //$NON-NLS-1$
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cache size for the index in percentage of max memory.
|
||||||
|
*/
|
||||||
|
public static final String INDEX_DB_CACHE_SIZE_PCT = CCorePlugin.PLUGIN_ID + ".indexDBCacheSizePct"; //$NON-NLS-1$
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default cache size of the index-db in percentage of max memory.
|
||||||
|
*/
|
||||||
|
public static final String DEFAULT_INDEX_DB_CACHE_SIZE_PCT = "10"; //$NON-NLS-1$
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Absolute maximum size of the index-db in megabytes.
|
||||||
|
*/
|
||||||
|
public static final String MAX_INDEX_DB_CACHE_SIZE_MB = CCorePlugin.PLUGIN_ID + ".maxIndexDBCacheSizeMB"; //$NON-NLS-1$
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default absolute maximum size of the index-db in megabytes.
|
||||||
|
*/
|
||||||
|
public static final String DEFAULT_MAX_INDEX_DB_CACHE_SIZE_MB = "64"; //$NON-NLS-1$
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,8 +42,9 @@ public class CCorePreferenceInitializer extends AbstractPreferenceInitializer {
|
||||||
defaultOptionsMap.put(CCorePreferenceConstants.TRANSLATION_TASK_TAGS, CCorePreferenceConstants.DEFAULT_TASK_TAG);
|
defaultOptionsMap.put(CCorePreferenceConstants.TRANSLATION_TASK_TAGS, CCorePreferenceConstants.DEFAULT_TASK_TAG);
|
||||||
defaultOptionsMap.put(CCorePreferenceConstants.TRANSLATION_TASK_PRIORITIES, CCorePreferenceConstants.DEFAULT_TASK_PRIORITY);
|
defaultOptionsMap.put(CCorePreferenceConstants.TRANSLATION_TASK_PRIORITIES, CCorePreferenceConstants.DEFAULT_TASK_PRIORITY);
|
||||||
defaultOptionsMap.put(CCorePreferenceConstants.CODE_FORMATTER, CCorePreferenceConstants.DEFAULT_CODE_FORMATTER);
|
defaultOptionsMap.put(CCorePreferenceConstants.CODE_FORMATTER, CCorePreferenceConstants.DEFAULT_CODE_FORMATTER);
|
||||||
|
defaultOptionsMap.put(CCorePreferenceConstants.INDEX_DB_CACHE_SIZE_PCT, CCorePreferenceConstants.DEFAULT_INDEX_DB_CACHE_SIZE_PCT);
|
||||||
|
defaultOptionsMap.put(CCorePreferenceConstants.MAX_INDEX_DB_CACHE_SIZE_MB, CCorePreferenceConstants.DEFAULT_MAX_INDEX_DB_CACHE_SIZE_MB);
|
||||||
|
|
||||||
// Store default values to default preferences
|
// Store default values to default preferences
|
||||||
IEclipsePreferences defaultPreferences = ((IScopeContext) new DefaultScope()).getNode(CCorePlugin.PLUGIN_ID);
|
IEclipsePreferences defaultPreferences = ((IScopeContext) new DefaultScope()).getNode(CCorePlugin.PLUGIN_ID);
|
||||||
for (Iterator iter = defaultOptionsMap.entrySet().iterator(); iter.hasNext();) {
|
for (Iterator iter = defaultOptionsMap.entrySet().iterator(); iter.hasNext();) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue