}
memo.setRdfizer(rfiz.getClass().getName());
RDFSink adder = dataStore.createInputSink(sc);
RDFBuffer buffer = new RDFBuffer(adder);
// Note: any context information in the source document is discarded.
RDFSink pipe = new SingleContextPipe(buffer, valueFactory.createURI(graphUri), valueFactory);
RDFHandler handler = new SesameInputAdapter(useBlankNodes
? pipe
: new BNodeToURIFilter(pipe, valueFactory));
InputStream is;
try {
is = rep.getStream();
} catch (IOException e) {
throw new RippleException(e);
}
// Use the namespace portion of the original URI as the base URI for the retrieved RDF document.
String baseUri = uri.getNamespace();
memo.setStatus(rfiz.rdfize(is, handler, baseUri));
// Only update the graph in the triple store if the operation was successful.
if (CacheEntry.Status.Success == memo.getStatus()) {
try {
sc.removeStatements(null, null, null, valueFactory.createURI(graphUri));
} catch (SailException e) {
throw new RippleException(e);
}
buffer.flush();
}
} finally {
metadata.setMemo(graphUri, memo, sc);
// an autocommit happens independently of a call to LinkedDataSail#commit