Skip to content

Commit b081d16

Browse files
Update text2sql.py according to linter
1 parent 1098331 commit b081d16

File tree

1 file changed

+9
-14
lines changed
  • llama-index-integrations/llms/llama-index-llms-ipex-llm/examples

1 file changed

+9
-14
lines changed

llama-index-integrations/llms/llama-index-llms-ipex-llm/examples/text2sql.py

+9-14
Original file line numberDiff line numberDiff line change
@@ -48,13 +48,12 @@ def define_sql_database(engine, city_stats_table):
4848

4949
def main(args):
5050
engine, city_stats_table = create_database_schema()
51-
51+
5252
sql_database = define_sql_database(engine, city_stats_table)
53-
53+
5454
model_id = args.embedding_model_path
5555
device_map = args.device
5656

57-
5857
embed_model = IpexLLMEmbedding(model_id, device=device_map)
5958

6059
llm = IpexLLM.from_model_id(
@@ -64,7 +63,7 @@ def main(args):
6463
max_new_tokens=args.n_predict,
6564
generate_kwargs={"temperature": 0.7, "do_sample": False},
6665
model_kwargs={},
67-
device_map=device_map
66+
device_map=device_map,
6867
)
6968

7069

@@ -74,7 +73,7 @@ def main(args):
7473
tables=["city_stats"],
7574
llm=llm,
7675
embed_model=embed_model,
77-
return_raw=True
76+
return_raw=True,
7877
)
7978

8079
query_engine = RetrieverQueryEngine.from_args(nl_sql_retriever, llm=llm)
@@ -90,35 +89,31 @@ def main(args):
9089
"--model-path",
9190
type=str,
9291
required=True,
93-
help="the path to transformers model"
92+
help="the path to transformers model",
9493
)
9594
parser.add_argument(
9695
"--device",
9796
"-d",
9897
type=str,
9998
default="cpu",
10099
choices=["cpu", "xpu"],
101-
help="The device (Intel CPU or Intel GPU) the LLM model runs on",
100+
help="The device (Intel CPU or Intel GPU) the LLM model runs on"
102101
)
103102
parser.add_argument(
104103
"-q",
105104
"--question",
106105
type=str,
107106
default="Which city has the highest population?",
108-
help="question you want to ask."
107+
help="question you want to ask.",
109108
)
110109
parser.add_argument(
111110
"-e",
112111
"--embedding-model-path",
113112
default="BAAI/bge-small-en",
114-
help="the path to embedding model path"
113+
help="the path to embedding model path",
115114
)
116115
parser.add_argument(
117-
"-n",
118-
"--n-predict",
119-
type=int,
120-
default=32,
121-
help="max number of predict tokens"
116+
"-n", "--n-predict", type=int, default=32, help="max number of predict tokens"
122117
)
123118
args = parser.parse_args()
124119

0 commit comments

Comments
 (0)