mirror of
https://github.com/eclipse-cdt/cdt
synced 2025-04-23 14:42:11 +02:00
Cosmetics.
This commit is contained in:
parent
91449f9094
commit
4f40d2e9ca
1 changed files with 14 additions and 15 deletions
|
@ -73,10 +73,10 @@ public class Database {
|
|||
public static final int BLOCK_SIZE_DELTA_BITS = 3;
|
||||
public static final int BLOCK_SIZE_DELTA= 1 << BLOCK_SIZE_DELTA_BITS;
|
||||
public static final int MIN_BLOCK_DELTAS = 2; // a block must at least be 2 + 2*4 bytes to link the free blocks.
|
||||
public static final int MAX_BLOCK_DELTAS = CHUNK_SIZE/BLOCK_SIZE_DELTA;
|
||||
public static final int MAX_MALLOC_SIZE = MAX_BLOCK_DELTAS*BLOCK_SIZE_DELTA - BLOCK_HEADER_SIZE;
|
||||
public static final int MAX_BLOCK_DELTAS = CHUNK_SIZE / BLOCK_SIZE_DELTA;
|
||||
public static final int MAX_MALLOC_SIZE = MAX_BLOCK_DELTAS * BLOCK_SIZE_DELTA - BLOCK_HEADER_SIZE;
|
||||
public static final int PTR_SIZE = 4; // size of a pointer in the database in bytes
|
||||
public static final int TYPE_SIZE = 2+PTR_SIZE; // size of a type in the database in bytes
|
||||
public static final int TYPE_SIZE = 2 + PTR_SIZE; // size of a type in the database in bytes
|
||||
public static final int VALUE_SIZE = TYPE_SIZE; // size of a value in the database in bytes
|
||||
public static final int ARGUMENT_SIZE = TYPE_SIZE; // size of a template argument in the database in bytes
|
||||
public static final long MAX_DB_SIZE= ((long) 1 << (Integer.SIZE + BLOCK_SIZE_DELTA_BITS));
|
||||
|
@ -91,9 +91,9 @@ public class Database {
|
|||
private final File fLocation;
|
||||
private final boolean fReadOnly;
|
||||
private RandomAccessFile fFile;
|
||||
private boolean fExclusiveLock= false; // necessary for any write operation
|
||||
private boolean fExclusiveLock; // necessary for any write operation
|
||||
private boolean fLocked; // necessary for any operation.
|
||||
private boolean fIsMarkedIncomplete= false;
|
||||
private boolean fIsMarkedIncomplete;
|
||||
|
||||
private int fVersion;
|
||||
private final Chunk fHeaderChunk;
|
||||
|
@ -151,7 +151,7 @@ public class Database {
|
|||
fFile.getChannel().read(buf, position);
|
||||
return;
|
||||
} catch (ClosedChannelException e) {
|
||||
// bug 219834 file may have be closed by interrupting a thread during an I/O operation.
|
||||
// Bug 219834 file may have be closed by interrupting a thread during an I/O operation.
|
||||
reopen(e, ++retries);
|
||||
}
|
||||
} while (true);
|
||||
|
@ -164,14 +164,14 @@ public class Database {
|
|||
fFile.getChannel().write(buf, position);
|
||||
return;
|
||||
} catch (ClosedChannelException e) {
|
||||
// bug 219834 file may have be closed by interrupting a thread during an I/O operation.
|
||||
// Bug 219834 file may have be closed by interrupting a thread during an I/O operation.
|
||||
reopen(e, ++retries);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void reopen(ClosedChannelException e, int attempt) throws ClosedChannelException, FileNotFoundException {
|
||||
// only if the current thread was not interrupted we try to reopen the file.
|
||||
// Only if the current thread was not interrupted we try to reopen the file.
|
||||
if (e instanceof ClosedByInterruptException || attempt >= 20) {
|
||||
throw e;
|
||||
}
|
||||
|
@ -185,9 +185,9 @@ public class Database {
|
|||
long position = 0;
|
||||
long size = from.size();
|
||||
while (position < size) {
|
||||
nRead = from.transferTo(position, 4096*16, target);
|
||||
nRead = from.transferTo(position, 4096 * 16, target);
|
||||
if (nRead == 0) {
|
||||
break; // should not happen
|
||||
break; // Should not happen
|
||||
} else {
|
||||
position+= nRead;
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ public class Database {
|
|||
if (useBytes) {
|
||||
bytelen= len;
|
||||
} else {
|
||||
bytelen= 2*len;
|
||||
bytelen= 2 * len;
|
||||
}
|
||||
|
||||
if (bytelen > ShortString.MAX_BYTE_LENGTH) {
|
||||
|
@ -616,7 +616,7 @@ public class Database {
|
|||
// chunks have been removed from the cache, so we are fine
|
||||
fHeaderChunk.clear(0, CHUNK_SIZE);
|
||||
fHeaderChunk.fDirty= false;
|
||||
fChunks= new Chunk[] {null};
|
||||
fChunks= new Chunk[] { null };
|
||||
fChunksUsed = fChunksAllocated = fChunks.length;
|
||||
try {
|
||||
fFile.close();
|
||||
|
@ -695,8 +695,7 @@ public class Database {
|
|||
}
|
||||
// also handles header chunk
|
||||
flushAndUnlockChunks(dirtyChunks, flush);
|
||||
}
|
||||
finally {
|
||||
} finally {
|
||||
fExclusiveLock= false;
|
||||
}
|
||||
}
|
||||
|
@ -730,7 +729,7 @@ public class Database {
|
|||
|
||||
private void flushAndUnlockChunks(final ArrayList<Chunk> dirtyChunks, boolean isComplete) throws CoreException {
|
||||
assert !Thread.holdsLock(fCache);
|
||||
synchronized(fHeaderChunk) {
|
||||
synchronized (fHeaderChunk) {
|
||||
final boolean haveDirtyChunks = !dirtyChunks.isEmpty();
|
||||
if (haveDirtyChunks || fHeaderChunk.fDirty) {
|
||||
markFileIncomplete();
|
||||
|
|
Loading…
Add table
Reference in a new issue