@@ -288,18 +288,22 @@ def _dropTableIfExists(self, schema_table_name):
288288 print (f'Droping table { schema_table_name } ' )
289289 self .dropTable (schema_table_name )
290290
291- def createTable (self , schema_table_name , dataframe , keys = None , create_table_options = None , to_upper = False ):
291+ def createTable (self , schema_table_name , dataframe , primary_keys = None , create_table_options = None , to_upper = False , drop_table = False ):
292292 """
293293 Creates a schema.table from a dataframe
294294 :param schema_table_name: str The schema.table to create
295295 :param dataframe: The Spark DataFrame to base the table off
296- :param keys : List[str] the primary keys. Default None
296+ :param primary_keys : List[str] the primary keys. Default None
297297 :param create_table_options: str The additional table-level SQL options default None
298298 :param to_upper: bool If the dataframe columns should be converted to uppercase before table creation
299299 If False, the table will be created with lower case columns. Default False
300+ :param drop_table: bool whether to drop the table if it exists. Default False. If False and the table exists,
301+ the function will throw an exception.
300302 """
301303 if to_upper :
302304 dataframe = self .toUpper (dataframe )
305+ if drop_table :
306+ self ._dropTableIfExists (schema_table_name )
303307 # Need to convert List (keys) to scala seq
304- keys_seq = self .jvm .PythonUtils .toSeq (keys )
308+ keys_seq = self .jvm .PythonUtils .toSeq (primary_keys )
305309 self .context .createTable (schema_table_name , dataframe ._jdf .schema (), keys_seq , create_table_options )
0 commit comments