@@ -100,7 +100,9 @@ private static class PrimitiveCase {
100100 }
101101 }
102102
103- // Convert a string to a Decimal that can be written using Avro.
103+ /**
104+ * Convert a string to a Decimal that can be written using Avro.
105+ */
104106 private static Object avroDecimalValue (String s ) {
105107 BigDecimal v = new BigDecimal (s );
106108 int precision = v .precision ();
@@ -1847,8 +1849,7 @@ public void testArrayWithElementValueTypedValueConflict() {
18471849 * This is a custom Parquet writer builder that injects a specific Parquet schema and then uses
18481850 * the Avro object model. This ensures that the Parquet file's schema is exactly what was passed.
18491851 */
1850- private static class TestWriterBuilder
1851- extends ParquetWriter .Builder <GenericRecord , TestWriterBuilder > {
1852+ private static class TestWriterBuilder extends ParquetWriter .Builder <GenericRecord , TestWriterBuilder > {
18521853 private TestSchema schema = null ;
18531854
18541855 protected TestWriterBuilder (Path path ) {
@@ -1867,20 +1868,21 @@ protected TestWriterBuilder self() {
18671868
18681869 @ Override
18691870 protected WriteSupport <GenericRecord > getWriteSupport (Configuration conf ) {
1870- return new AvroWriteSupport <>(schema .parquetSchema , avroSchema (schema .unannotatedParquetSchema ), GenericData .get ());
1871+ return new AvroWriteSupport <>(
1872+ schema .parquetSchema ,
1873+ avroSchema (schema .unannotatedParquetSchema ),
1874+ GenericData .get ());
18711875 }
18721876 }
18731877
1874- GenericRecord writeAndRead (TestSchema testSchema , GenericRecord record )
1875- throws IOException {
1878+ GenericRecord writeAndRead (TestSchema testSchema , GenericRecord record ) throws IOException {
18761879 List <GenericRecord > result = writeAndRead (testSchema , Arrays .asList (record ));
18771880 assert (result .size () == 1 );
18781881 return result .get (0 );
18791882 }
18801883
1881- List <GenericRecord > writeAndRead (TestSchema testSchema , List <GenericRecord > records )
1882- throws IOException {
1883- // Copied from TestSpecificReadWrite.java. Why does it do these weird things?
1884+ List <GenericRecord > writeAndRead (TestSchema testSchema , List <GenericRecord > records ) throws IOException {
1885+ // Create a temporary file for testing
18841886 File tmp = File .createTempFile (getClass ().getSimpleName (), ".tmp" );
18851887 tmp .deleteOnExit ();
18861888 tmp .delete ();
@@ -1894,9 +1896,9 @@ List<GenericRecord> writeAndRead(TestSchema testSchema, List<GenericRecord> reco
18941896 }
18951897
18961898 Configuration conf = new Configuration ();
1897- // We need to set an explicit read schema, because Avro wrote the shredding schema as the Avro schema in the
1898- // write, and it will use that by default. If we write using a proper shredding writer, the Avro schema
1899- // should just contain a <metadata, value> record, and we won't need this.
1899+ // We need to set an explicit read schema because Avro wrote the shredding schema as the Avro
1900+ // schema in the write, and it will use that by default. If we write using a proper shredding
1901+ // writer, the Avro schema should just contain a <metadata, value> record, and we won't need this.
19001902 AvroReadSupport .setAvroReadSchema (conf , avroSchema (testSchema .parquetSchema ));
19011903 AvroParquetReader <GenericRecord > reader = new AvroParquetReader (conf , path );
19021904
@@ -2124,7 +2126,9 @@ private static void checkShreddedType(Type shreddedType) {
21242126 shreddedType .getRepetition ());
21252127 }
21262128
2127- // Check for the given excpetion with message, possibly wrapped by a ParquetDecodingException
2129+ /**
2130+ * Check for the given exception with message, possibly wrapped by a ParquetDecodingException.
2131+ */
21282132 void assertThrows (Callable callable , Class <? extends Exception > exception , String msg ) {
21292133 try {
21302134 callable .call ();
@@ -2145,12 +2149,15 @@ void assertThrows(Callable callable, Class<? extends Exception> exception, Strin
21452149 }
21462150 }
21472151
2148- // Assert that metadata contains identical bytes to expected, and value is logically equivalent.
2149- // E.g. object fields may be ordered differently in the binary.
2152+ /**
2153+ * Assert that metadata contains identical bytes to expected, and value is logically equivalent.
2154+ * E.g. object fields may be ordered differently in the binary.
2155+ */
21502156 void assertEquivalent (ByteBuffer expectedMetadata , ByteBuffer expectedValue , GenericRecord actual ) {
21512157 assertEquals (expectedMetadata , (ByteBuffer ) actual .get ("metadata" ));
21522158 assertEquals (expectedMetadata , (ByteBuffer ) actual .get ("metadata" ));
2153- assertEquivalent (new Variant (expectedValue , expectedMetadata ),
2159+ assertEquivalent (
2160+ new Variant (expectedValue , expectedMetadata ),
21542161 new Variant (((ByteBuffer ) actual .get ("value" )), expectedMetadata ));
21552162 }
21562163
0 commit comments