diff --git a/phoenix-client-parent/pom.xml b/phoenix-client-parent/pom.xml
index 0b3f15b8dfd..c304bd87a8c 100644
--- a/phoenix-client-parent/pom.xml
+++ b/phoenix-client-parent/pom.xml
@@ -165,6 +165,9 @@
org/apache/phoenix/**
org/apache/omid/**
+
+ org/xerial/snappy/**
diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml
index c9112e850ba..a6cdd583586 100644
--- a/phoenix-core-client/pom.xml
+++ b/phoenix-core-client/pom.xml
@@ -359,8 +359,8 @@
jsr305
- org.iq80.snappy
- snappy
+ org.xerial.snappy
+ snappy-java
com.fasterxml.jackson.core
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java
index cea6d49c636..bc907eb37e2 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java
@@ -33,10 +33,10 @@
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PVarbinary;
+import org.xerial.snappy.Snappy;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
-import org.iq80.snappy.Snappy;
/**
* Client side Aggregator which will aggregate data and find distinct values with number of occurrences for each.
@@ -66,7 +66,7 @@ public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) {
if (Bytes.equals(ptr.get(), ptr.getOffset(), 1, DistinctValueWithCountServerAggregator.COMPRESS_MARKER,
0, 1)) {
// This reads the uncompressed length from the front of the compressed input
- int uncompressedLength = Snappy.getUncompressedLength(ptr.get(), ptr.getOffset() + 1);
+ int uncompressedLength = Snappy.uncompressedLength(ptr.get(), ptr.getOffset() + 1, ptr.getLength() - 1);
byte[] uncompressed = new byte[uncompressedLength];
// This will throw CorruptionException, a RuntimeException if the snappy data is invalid.
// We're making a RuntimeException out of a checked IOException below so assume it's ok
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java
index af649004b74..9b722a63d7b 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.expression.aggregator;
+import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
@@ -33,7 +34,7 @@
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.SizedUtil;
-import org.iq80.snappy.Snappy;
+import org.xerial.snappy.Snappy;
/**
* Server side Aggregator which will aggregate data and find distinct values with number of occurrences for each.
@@ -108,8 +109,12 @@ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
// The size for the map serialization is above the threshold. We will do the Snappy compression here.
byte[] compressed = new byte[COMPRESS_MARKER.length + Snappy.maxCompressedLength(buffer.length)];
System.arraycopy(COMPRESS_MARKER, 0, compressed, 0, COMPRESS_MARKER.length);
- int compressedLen = Snappy.compress(buffer, 1, buffer.length - 1, compressed, COMPRESS_MARKER.length);
- ptr.set(compressed, 0, compressedLen + 1);
+ try {
+ int compressedLen = Snappy.compress(buffer, 1, buffer.length - 1, compressed, COMPRESS_MARKER.length);
+ ptr.set(compressed, 0, compressedLen + 1);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
return true;
}
ptr.set(buffer, 0, offset);
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java
index 9a5fb6eb798..07b089814e8 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java
@@ -44,8 +44,7 @@
import org.apache.phoenix.util.ClientUtil;
import org.apache.phoenix.util.TrustedByteArrayOutputStream;
import org.apache.phoenix.util.TupleUtil;
-import org.iq80.snappy.Snappy;
-
+import org.xerial.snappy.Snappy;
import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
/**
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
index c36c6ecd869..3c92c9e45cf 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
@@ -51,8 +51,7 @@
import org.apache.phoenix.util.ResultUtil;
import org.apache.phoenix.util.SizedUtil;
import org.apache.phoenix.util.TupleUtil;
-import org.iq80.snappy.CorruptionException;
-import org.iq80.snappy.Snappy;
+import org.xerial.snappy.Snappy;
public class HashCacheFactory implements ServerCacheFactory {
@@ -71,12 +70,11 @@ public void write(DataOutput output) throws IOException {
public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, MemoryChunk chunk, boolean useProtoForIndexMaintainer, int clientVersion) throws SQLException {
try {
// This reads the uncompressed length from the front of the compressed input
- int uncompressedLen = Snappy.getUncompressedLength(cachePtr.get(), cachePtr.getOffset());
+ int uncompressedLen = Snappy.uncompressedLength(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength());
byte[] uncompressed = new byte[uncompressedLen];
- Snappy.uncompress(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength(),
- uncompressed, 0);
+ Snappy.uncompress(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength(), uncompressed, 0);
return new HashCacheImpl(uncompressed, chunk, clientVersion);
- } catch (CorruptionException e) {
+ } catch (IOException e) {
throw ClientUtil.parseServerException(e);
}
}
diff --git a/phoenix-mapreduce-byo-shaded-hbase/pom.xml b/phoenix-mapreduce-byo-shaded-hbase/pom.xml
index 7a20de505b9..47d14a13344 100644
--- a/phoenix-mapreduce-byo-shaded-hbase/pom.xml
+++ b/phoenix-mapreduce-byo-shaded-hbase/pom.xml
@@ -175,7 +175,10 @@
- org/apache/commons/configuration2/**
+ org/apache/commons/configuration2/**
+
+ org/xerial/snappy/**
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index e6eb646633e..aeea3649b78 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -178,7 +178,10 @@
- org/apache/commons/configuration2/**
+ org/apache/commons/configuration2/**
+
+ org/xerial/snappy/**