99
1010- AI on AWS pointers
1111- EC2 working environment build with Q Developer on VS Code Server
12- - Example Python Streamlit AI playground application
12+ - Example Python Streamlit application: Uses 4 models, 2 AWS-hosted, 2 copied from HF
1313
1414
1515
@@ -181,7 +181,7 @@ G.add_edge('A', 'B')
181181print(f"NetworkX working: {list(G.edges())}")
182182```
183183
184- ## Example Python Streamlit AI playground application
184+ ## Example Python Streamlit application: Uses 4 models, 2 AWS-hosted, 2 copied from HF
185185
186186
187187```
@@ -194,8 +194,8 @@ from PIL import Image
194194from transformers import GPT2LMHeadModel, GPT2Tokenizer, DistilBertTokenizer, DistilBertForSequenceClassification
195195import torch
196196
197- st.title("Building with Bedrock") # Title of the application
198- st.subheader("Model Playground ")
197+ st.title("AWS Bedrock Web App ") # Title of the application
198+ st.subheader("Four models available ")
199199
200200# Turn base64 string to image with PIL
201201def base64_to_pil(base64_string):
@@ -220,7 +220,7 @@ bedrock_runtime = boto3.client(
220220)
221221
222222
223- # Bedrock api call to stable diffusion
223+ # Bedrock api call to Stable Image Core text to image model
224224def generate_image_sd(text):
225225 """
226226 Purpose:
@@ -235,20 +235,17 @@ def generate_image_sd(text):
235235 "output_format": "jpeg",
236236 "seed": 0,
237237 }
238-
239238 body = json.dumps(body)
240-
241239 modelId = "stability.stable-image-core-v1:1"
242-
243240 response = bedrock_runtime.invoke_model(
244241 body=body,
245242 modelId=modelId
246243 )
247244 response_body = json.loads(response["body"].read().decode("utf-8"))
248-
249245 results = response_body["images"][0]
250246 return results
251247
248+ # Nova is an AWS hosted text completion model
252249def call_nova(
253250 system_prompt: str,
254251 prompt: str,
@@ -268,16 +265,13 @@ def call_nova(
268265 ],
269266 }
270267 body = json.dumps(prompt_config)
271-
272268 modelId = model_id
273269 accept = "application/json"
274270 contentType = "application/json"
275-
276271 response = bedrock_runtime.invoke_model(
277272 body=body, modelId=modelId, accept=accept, contentType=contentType
278273 )
279274 response_body = json.loads(response.get("body").read())
280-
281275 results = response_body["output"]["message"]["content"][0].get("text")
282276 return results
283277
@@ -314,7 +308,6 @@ models = ["Stable Image Core", "Amazon Nova Pro", "GPT2", "DistilBERT"]
314308
315309current_model = st.selectbox("Select Model", models)
316310
317-
318311if current_model == "Stable Image Core":
319312 # text input
320313 prompt = st.text_area("Enter prompt")
0 commit comments