disabledOptimizerRules.add("PredicatePushdownOptimizer");
pigServer_disabledRule.getPigContext().getProperties().setProperty(PigImplConstants.PIG_OPTIMIZER_RULES_KEY,
ObjectSerializer.serialize(disabledOptimizerRules));
pigServer_disabledRule.registerQuery("B = load '" + inputFile + "' using OrcStorage();");
pigServer_disabledRule.registerQuery("C = filter B by " + filterStmt + ";");
ExecJob job = pigServer_disabledRule.store("C", OUTPUT3);
//Util.copyFromClusterToLocal(cluster, OUTPUT3 + "/part-m-00000", OUTPUT3);
JobStats stats = (JobStats) job.getStatistics().getJobGraph().getSources().get(0);
assertEquals(expectedRows, stats.getRecordWrittern());
long bytesWithoutPushdown = stats.getHdfsBytesRead();
// Test with PredicatePushdownOptimizer enabled. Only 2 blocks should be read
pigServer.registerQuery("D = load '" + inputFile + "' using OrcStorage();");
pigServer.registerQuery("E = filter D by " + filterStmt + ";");
job = pigServer.store("E", OUTPUT4);
//Util.copyFromClusterToLocal(cluster, OUTPUT4 + "/part-m-00000", OUTPUT4);
stats = (JobStats) job.getStatistics().getJobGraph().getSources().get(0);
assertEquals(expectedRows, stats.getRecordWrittern());
long bytesWithPushdown = stats.getHdfsBytesRead();
System.out.println("bytesWithoutPushdown was " + bytesWithoutPushdown +
" and bytesWithPushdown was " + bytesWithPushdown);