+ "unexpected type: " + rOut.getType());
}
// See Result.newDataHeadResult() for what we have here
// .metaData, .navigator
RowSetNavigator navigator = rOut.getNavigator();
ResultMetaData md = rOut.metaData;
if (md == null) {
throw new RecoverableOdbcFailure(
"Failed to get metadata for query results");
}
int columnCount = md.getColumnCount();
String[] colLabels = md.getGeneratedColumnNames();
colTypes = md.columnTypes;
pgTypes = new PgType[columnCount];
for (int i = 0; i < pgTypes.length; i++) {
pgTypes[i] = PgType.getPgType(colTypes[i],
md.isTableColumn(i));
}
// fredt : colLabels may not contain some column names
// colDefs is used when no label is present:
// SELECT TABLECOL AS COLLABLE has both name and label
// SELECT TABLECOL has name 'TABLECOL'
// SELECT 2 AS CONST has label 'CONST'
ColumnBase[] colDefs = md.columns;
// Num cols.
outPacket.writeShort(columnCount);
for (int i = 0; i < columnCount; i++) {
// col name
if (colLabels[i] != null) {
outPacket.write(colLabels[i]);
} else {
outPacket.write(colDefs[i].getNameString());
}
// table ID [relid]:
outPacket.writeInt(OdbcUtil.getTableOidForColumn(i,
md));
// column id [attid]
outPacket.writeShort(OdbcUtil.getIdForColumn(i,
md));
outPacket.writeInt(pgTypes[i].getOid());
// Datatype size [adtsize]
outPacket.writeShort(pgTypes[i].getTypeWidth());
outPacket.writeInt(pgTypes[i].getLPConstraint());
// Var size [atttypmod]
// This is the size constraint integer
// like VARCHAR(12) or DECIMAL(4).
// -1 if none specified for this column.
outPacket.writeShort(0);
// format code, 0 = text column, 1 = binary column,
// but entirely ignored by our driver.
// Would only be non-0 if a 'B' command requested it.
}
outPacket.xmit('T', dataOutput); // Xmit Row Definition
int rowNum = 0;
while (navigator.next()) {
rowNum++;
Object[] rowData = navigator.getCurrent();
// Row.getData(). Don't know why *Data.getCurrent()
// method returns Object instead of O[].
// TODO: Remove the assertion here:
if (rowData == null) {
throw new RecoverableOdbcFailure("Null row?");
}
if (rowData.length < columnCount) {
throw new RecoverableOdbcFailure(
"Data element mismatch. " + columnCount
+ " metadata cols, yet " + rowData.length
+ " data elements for row " + rowNum);
}
//server.printWithThread("Row " + rowNum + " has "
//+ rowData.length + " elements");
outPacket.writeShort(columnCount);
// This field is just swallowed by PG ODBC
// client, but OdbcUtil.validated by psql.
for (int i = 0; i < columnCount; i++) {
if (rowData[i] == null) {
/*
server.printWithThread("R" + rowNum + "C"
+ (i+1) + " => [null]");
*/
outPacket.writeInt(-1);
} else {
dataString =
pgTypes[i].valueString(rowData[i]);
outPacket.writeSized(dataString);
if (server.isTrace()) {
server.printWithThread(
"R" + rowNum + "C" + (i + 1)
+ " => ("
+ rowData[i].getClass().getName()
+ ") [" + dataString + ']');
}
}
}
outPacket.xmit('D', dataOutput);
}
outPacket.write("SELECT");
outPacket.xmit('C', dataOutput);
sendReadyForQuery = true;
break;
}
if (normalized.startsWith("deallocate \"")
&& normalized.charAt(normalized.length() - 1)
== '"') {
tmpStr = sql.trim().substring(
"deallocate \"".length()).trim();
// Must use "sql" directly since name is case-sensitive
handle = tmpStr.substring(0, tmpStr.length() - 1);
odbcPs = (OdbcPreparedStatement) sessionOdbcPsMap.get(
handle);
if (odbcPs != null) {
odbcPs.close();
}
portal =
(StatementPortal) sessionOdbcPortalMap.get(handle);
if (portal != null) {
portal.close();
}
if (odbcPs == null && portal == null) {
/*
throw new RecoverableOdbcFailure(null,
"No object present for handle: " + handle, "08P01");
Driver does not handle state change correctly, so
for now we just issue a warning:
OdbcUtil.alertClient(OdbcUtil.ODBC_SEVERITY_ERROR,
"No object present for handle: " + handle,
dataOutput);
TODO: Retest this. May have been side-effect of
other problems.
*/
server.printWithThread(
"Ignoring bad 'DEALLOCATE' cmd");
}
if (server.isTrace()) {
server.printWithThread("Deallocated PS/Portal '"
+ handle + "'");
}
outPacket.write("DEALLOCATE");
outPacket.xmit('C', dataOutput);
sendReadyForQuery = true;
break;
}
if (normalized.startsWith("set client_encoding to ")) {
server.printWithThread("Stubbing EXECDIR for: " + sql);
outPacket.write("SET");
outPacket.xmit('C', dataOutput);
sendReadyForQuery = true;
break;
}
// Case below is non-String-matched Qs:
server.printWithThread("Performing a real EXECDIRECT...");
odbcExecDirect(sql);
sendReadyForQuery = true;
break;
case 'X' : // Terminate packet
if (sessionOdbcPsMap.size()
> (sessionOdbcPsMap.containsKey("") ? 1
: 0)) {
server.printWithThread("Client left "
+ sessionOdbcPsMap.size()
+ " PS objects open");
}
if (sessionOdbcPortalMap.size()
> (sessionOdbcPortalMap.containsKey("") ? 1
: 0)) {
server.printWithThread("Client left "
+ sessionOdbcPortalMap.size()
+ " Portal objects open");
}
OdbcUtil.validateInputPacketSize(inPacket);
throw cleanExit;
case 'H' : // Flush packet
// No-op. It is impossible to cache while supporting multiple
// ps and portal objects, so there is nothing for a Flush to
// do. There isn't even a reply to a Flush packet.
break;
case 'S' : // Sync packet
// Special case for Sync packets.
// To facilitate recovery, we do not abort in case of problems.
if (session.isAutoCommit()) {
try {
// I don't see how this can be useful. If we ran DML, it
// will have autocommitted. If we have just switched to
// autoCommit mode, then according to spec we must have
// executed an implicit commit then.
server.printWithThread(
"Silly implicit commit by Sync");
session.commit(true);
// TODO: Find out if chain param should be T or F.
} catch (HsqlException he) {
server.printWithThread("Implicit commit failed: "
+ he);
OdbcUtil.alertClient(OdbcUtil.ODBC_SEVERITY_ERROR,
"Implicit commit failed",
he.getSQLState(), dataOutput);
}
}
sendReadyForQuery = true;
break;
case 'P' : // Parse packet
psHandle = inPacket.readString();
String query = OdbcUtil.revertMungledPreparedQuery(
inPacket.readString());
paramCount = inPacket.readUnsignedShort();
for (int i = 0; i < paramCount; i++) {
if (inPacket.readInt() != 0) {
throw new RecoverableOdbcFailure(
null,
"Parameter-type OID specifiers not supported yet",
"0A000");
}
}
if (server.isTrace()) {
server.printWithThread(
"Received Prepare request for query (" + query
+ ") with handle '" + psHandle + "'");
}
if (psHandle.length() > 0
&& sessionOdbcPsMap.containsKey(psHandle)) {
throw new RecoverableOdbcFailure(
null,
"PS handle '" + psHandle + "' already in use. "
+ "You must close it before recreating", "08P01");
}
new OdbcPreparedStatement(psHandle, query,
sessionOdbcPsMap, session);
outPacket.xmit('1', dataOutput);
break;
case 'D' : // Describe packet
c = inPacket.readByteChar();
handle = inPacket.readString();
odbcPs = null;
portal = null;
if (c == 'S') {
odbcPs = (OdbcPreparedStatement) sessionOdbcPsMap.get(
handle);
} else if (c == 'P') {
portal =
(StatementPortal) sessionOdbcPortalMap.get(handle);
} else {
throw new RecoverableOdbcFailure(
null,
"Description packet request type invalid: " + c,
"08P01");
}
if (server.isTrace()) {
server.printWithThread("Received Describe request for "
+ c + " of handle '" + handle
+ "'");
}
if (odbcPs == null && portal == null) {
throw new RecoverableOdbcFailure(
null,
"No object present for " + c + " handle: "
+ handle, "08P01");
}
Result ackResult = (odbcPs == null) ? portal.ackResult
: odbcPs.ackResult;
pmd = ackResult.parameterMetaData;
paramCount = pmd.getColumnCount();
Type[] paramTypes = pmd.getParameterTypes();
if (paramCount != paramTypes.length) {
throw new RecoverableOdbcFailure(
"Parameter count mismatch. Count of "
+ paramCount + " reported, but there are "
+ paramTypes.length + " param md objects");
}
if (c == 'S') {
outPacket.writeShort(paramCount);
for (int i = 0; i < paramTypes.length; i++) {
outPacket.writeInt(
PgType.getPgType(
paramTypes[i], true).getOid());
// TODO: Determine whether parameter typing works
// better for Strings when try to match table column
// or not. 2nd param to getPgType().
}
outPacket.xmit('t', dataOutput);
// ParameterDescription packet
}
ResultMetaData md = ackResult.metaData;
if (md.getColumnCount() < 1) {
if (server.isTrace()) {
server.printWithThread(
"Non-rowset query so returning NoData packet");
}
// Send NoData packet because no columnar output from
// this statement.
outPacket.xmit('n', dataOutput);
break;
}
// TODO:
// May need to pass the extra BIGINT pseudo-column for
// updatable-row or other purposes. In that case, it may
// make sense to use getExtendedColumnCount(), etc.
String[] colNames = md.getGeneratedColumnNames();
if (md.getColumnCount() != colNames.length) {
throw new RecoverableOdbcFailure(
"Couldn't get all column names: "
+ md.getColumnCount() + " cols. but only got "
+ colNames.length + " col. names");
}
colTypes = md.columnTypes;
pgTypes = new PgType[colNames.length];
ColumnBase[] colDefs = md.columns;
for (int i = 0; i < pgTypes.length; i++) {
pgTypes[i] = PgType.getPgType(colTypes[i],
md.isTableColumn(i));
}
if (colNames.length != colDefs.length) {
throw new RecoverableOdbcFailure(
"Col data mismatch. " + colDefs.length
+ " col instances but " + colNames.length
+ " col names");
}
outPacket.writeShort(colNames.length); // Num cols.
for (int i = 0; i < colNames.length; i++) {
outPacket.write(colNames[i]); // Col. name
// table ID [relid]:
outPacket.writeInt(OdbcUtil.getTableOidForColumn(i,
md));
// column id [attid]
outPacket.writeShort(OdbcUtil.getIdForColumn(i, md));
outPacket.writeInt(pgTypes[i].getOid());
// Datatype size [adtsize]
outPacket.writeShort(pgTypes[i].getTypeWidth());
outPacket.writeInt(pgTypes[i].getLPConstraint());
// Var size [atttypmod]
// This is the size constraint integer
// like VARCHAR(12) or DECIMAL(4).
// -1 if none specified for this column.
outPacket.writeShort(0);
// format code, 0 = text column, 1 = binary column,
// but entirely ignored by our driver.
// Would only be non-0 if a 'B' command requested it.
}
outPacket.xmit('T', dataOutput); // Xmit Row Definition
break;
case 'B' : // Bind packet
portalHandle = inPacket.readString();
psHandle = inPacket.readString();
int paramFormatCount = inPacket.readUnsignedShort();
boolean[] paramBinary = new boolean[paramFormatCount];
for (int i = 0; i < paramFormatCount; i++) {
paramBinary[i] = inPacket.readUnsignedShort() != 0;
if (server.isTrace() && paramBinary[i]) {
server.printWithThread("Binary param #" + i);
}
}
paramCount = inPacket.readUnsignedShort();
Object[] paramVals = new Object[paramCount];
for (int i = 0; i < paramVals.length; i++) {
if (i < paramBinary.length && paramBinary[i]) {
paramVals[i] = inPacket.readSizedBinaryData();
} else {
paramVals[i] = inPacket.readSizedString();
}
}
int outFormatCount = inPacket.readUnsignedShort();
for (int i = 0; i < outFormatCount; i++) {
if (inPacket.readUnsignedShort() != 0) {
throw new RecoverableOdbcFailure(
null, "Binary output values not supported",
"0A000");
}
}
if (server.isTrace()) {
server.printWithThread(
"Received Bind request to make Portal from ("
+ psHandle + ")' with handle '" + portalHandle
+ "'");
}
odbcPs =
(OdbcPreparedStatement) sessionOdbcPsMap.get(psHandle);
if (odbcPs == null) {
throw new RecoverableOdbcFailure(
null,
"No object present for PS handle: " + psHandle,
"08P01");
}
if (portalHandle.length() > 0
&& sessionOdbcPortalMap.containsKey(
portalHandle)) {
throw new RecoverableOdbcFailure(
null,
"Portal handle '" + portalHandle
+ "' already in use. "
+ "You must close it before recreating", "08P01");
}
pmd = odbcPs.ackResult.parameterMetaData;
if (paramCount != pmd.getColumnCount()) {
throw new RecoverableOdbcFailure(
null,
"Client didn't specify all "
+ pmd.getColumnCount() + " parameters ("
+ paramCount + ')', "08P01");
}
new StatementPortal(portalHandle, odbcPs, paramVals,
sessionOdbcPortalMap);
outPacket.xmit('2', dataOutput);
break;
case 'E' : // Execute packet
portalHandle = inPacket.readString();
int fetchRows = inPacket.readInt();
if (server.isTrace()) {
server.printWithThread("Received Exec request for "
+ fetchRows
+ " rows from portal handle '"
+ portalHandle + "'");
}
portal = (StatementPortal) sessionOdbcPortalMap.get(
portalHandle);
if (portal == null) {
throw new RecoverableOdbcFailure(
null,
"No object present for Portal handle: "
+ portalHandle, "08P01");
}
// result properties means readonly, not holdable
portal.bindResult.setPreparedExecuteProperties(
portal.parameters, fetchRows, 0, 0);
// 0 for maxRows means unlimited. Same for fetchRows.
rOut = session.execute(portal.bindResult);
switch (rOut.getType()) {
case ResultConstants.UPDATECOUNT :
outPacket.write(
OdbcUtil.echoBackReplyString(
portal.lcQuery, rOut.getUpdateCount()));
outPacket.xmit('C', dataOutput);
// end of rows (B or D packets)
// This keeps session.autoUpdate in sync with client's
// notion of transaction state.
if (portal.lcQuery.equals("commit")
|| portal.lcQuery.startsWith("commit ")
|| portal.lcQuery.equals("rollback")
|| portal.lcQuery.startsWith(
"rollback ")) {
try {
session.setAutoCommit(true);
} catch (HsqlException he) {
throw new RecoverableOdbcFailure(
"Failed to change transaction state: "
+ he.getMessage(), he.getSQLState());
}
}
break MAIN_ODBC_COMM_SWITCH;
case ResultConstants.DATA :
break;
case ResultConstants.ERROR :
throw new RecoverableOdbcFailure(rOut);
default :
throw new RecoverableOdbcFailure(
"Output Result from Portal execution is of "
+ "unexpected type: " + rOut.getType());
}
// See Result.newDataHeadResult() for what we have here
// .metaData, .navigator
RowSetNavigator navigator = rOut.getNavigator();
int rowNum = 0;
int colCount = portal.ackResult.metaData.getColumnCount();
while (navigator.next()) {
rowNum++;
Object[] rowData = navigator.getCurrent();
if (rowData == null) {
throw new RecoverableOdbcFailure("Null row?");
}
if (rowData.length < colCount) {
throw new RecoverableOdbcFailure(
"Data element mismatch. " + colCount
+ " metadata cols, yet " + rowData.length
+ " data elements for row " + rowNum);
}
//server.printWithThread("Row " + rowNum + " has "
//+ rowData.length + " elements");
outPacket.writeShort(colCount);
// This field is just swallowed by PG ODBC
// client, but validated by psql.
colTypes = portal.ackResult.metaData.columnTypes;
pgTypes = new PgType[colCount];
for (int i = 0; i < pgTypes.length; i++) {
pgTypes[i] = PgType.getPgType(
colTypes[i],
portal.ackResult.metaData.isTableColumn(i));
}
for (int i = 0; i < colCount; i++) {
if (rowData[i] == null) {
/*
server.printWithThread("R" + rowNum + "C"
+ (i+1) + " => [null]");
*/
outPacket.writeInt(-1);
} else {
dataString =
pgTypes[i].valueString(rowData[i]);
outPacket.writeSized(dataString);
if (server.isTrace()) {
server.printWithThread(
"R" + rowNum + "C" + (i + 1) + " => ("
+ rowData[i].getClass().getName()
+ ") [" + dataString + ']');
}
}
}
outPacket.xmit('D', dataOutput);
}
if (navigator.afterLast()) {
outPacket.write("SELECT");
outPacket.xmit('C', dataOutput);
// end of rows (B or D packets)
} else {