-
Notifications
You must be signed in to change notification settings - Fork 15
Expand file tree
/
Copy pathapp.py
More file actions
49 lines (39 loc) · 1.33 KB
/
app.py
File metadata and controls
49 lines (39 loc) · 1.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from flask import Flask, request, send_file, jsonify, render_template, send_from_directory
from flask_cors import CORS
import io
import torch
from torch import autocast
from diffusers import StableDiffusionPipeline
app = Flask(__name__, static_url_path='', static_folder='frontend/build')
# app.debug=True
CORS(app)
assert torch.cuda.is_available()
pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
use_auth_token=True
).to("cuda")
def run_inference(prompt):
with autocast("cuda"):
image = pipe(prompt)["sample"][0]
img_data = io.BytesIO()
image.save(img_data, "PNG")
img_data.seek(0)
return img_data
@app.route('/')
def myapp():
if "prompt" not in request.args:
return "Please specify a prompt parameter", 400
prompt = request.args["prompt"]
img_data = run_inference(prompt)
return send_file(img_data, mimetype='image/png')
@app.route('/app', defaults={'path':''})
def serve(path):
return send_from_directory(app.static_folder,'index.html')
@app.route('/app/inference', methods=['POST'])
def handleAppInference():
data = request.get_json()
prompt = data["prompt"]
img_data = run_inference(prompt)
return send_file(img_data, mimetype='image/png')
if __name__ == "__main__":
app.run(threaded=False, host='0.0.0.0', port=3000)