);
}
LOGGER.finer( "Transaction Delete:"+element );
try {
Filter filter = delete.getFilter();
Envelope damaged = store.getBounds(new DefaultQuery(
delete.getTypeName(), filter));
if (damaged == null) {
damaged = store.getFeatures(filter).getBounds();
}
if ((request.getLockId() != null)
&& store instanceof FeatureLocking
&& (request.getReleaseAction() == TransactionRequest.SOME)) {
FeatureLocking locking = (FeatureLocking) store;
// TODO: Revisit Lock/Delete interaction in gt2
if (false) {
// REVISIT: This is bad - by releasing locks before
// we remove features we open ourselves up to the danger
// of someone else locking the features we are about to
// remove.
//
// We cannot do it the other way round, as the Features will
// not exist
//
// We cannot grab the fids offline using AUTO_COMMIT
// because we may have removed some of them earlier in the
// transaction
//
locking.unLockFeatures(filter);
store.removeFeatures(filter);
} else {
// This a bit better and what should be done, we will
// need to rework the gt2 locking api to work with
// fids or something
//
// The only other thing that would work would be
// to specify that FeatureLocking is required to
// remove locks when removing Features.
//
// While that sounds like a good idea, it would be
// extra work when doing release mode ALL.
//
DataStore data = store.getDataStore();
FilterFactory factory = FilterFactory
.createFilterFactory();
FeatureWriter writer;
writer = data.getFeatureWriter(typeName, filter,
transaction);
try {
while (writer.hasNext()) {
String fid = writer.next().getID();
locking.unLockFeatures(factory
.createFidFilter(fid));
writer.remove();
}
} finally {
writer.close();
}
store.removeFeatures(filter);
}
} else {
// We don't have to worry about locking right now
//
store.removeFeatures(filter);
}
envelope.expandToInclude(damaged);
} catch (IOException ioException) {
throw new WfsTransactionException(ioException.getMessage(),
element.getHandle(), request.getHandle());
}
}
if (element instanceof InsertRequest) {
if ((request.getWFS().getServiceLevel() & WFSDTO.SERVICE_INSERT) == 0) {
// could we catch this during the handler, rather than during execution?
throw new ServiceException(
"Transaction INSERT support is not enabled");
}
LOGGER.finer( "Transasction Insert:"+element );
try {
InsertRequest insert = (InsertRequest) element;
FeatureCollection collection = insert.getFeatures();
FeatureReader reader = DataUtilities.reader(collection);
FeatureType schema = store.getSchema();
// Need to use the namespace here for the lookup, due to our weird
// prefixed internal typenames. see
// http://jira.codehaus.org/secure/ViewIssue.jspa?key=GEOS-143
// Once we get our datastores making features with the correct namespaces
// we can do something like this:
// FeatureTypeInfo typeInfo = catalog.getFeatureTypeInfo(schema.getTypeName(), schema.getNamespace());
// until then (when geos-144 is resolved) we're stuck with:
FeatureTypeInfo typeInfo = catalog.getFeatureTypeInfo(element.getTypeName() );
// this is possible with the insert hack above.
LOGGER.finer("Use featureValidation to check contents of insert" );
featureValidation( typeInfo.getDataStoreInfo().getId(), schema, collection );
Set fids = store.addFeatures(reader);
build.addInsertResult(element.getHandle(), fids);
//
// Add to validation check envelope
envelope.expandToInclude(collection.getBounds());
} catch (IOException ioException) {
throw new WfsTransactionException(ioException,
element.getHandle(), request.getHandle());
}
}
if (element instanceof UpdateRequest) {
if ((request.getWFS().getServiceLevel() & WFSDTO.SERVICE_UPDATE) == 0) {
// could we catch this during the handler, rather than during execution?
throw new ServiceException(
"Transaction Update support is not enabled");
}
LOGGER.finer( "Transaction Update:"+element);
try {
UpdateRequest update = (UpdateRequest) element;
Filter filter = update.getFilter();
AttributeType[] types = update.getTypes(store.getSchema());
Object[] values = update.getValues();
DefaultQuery query = new DefaultQuery(update.getTypeName(),