Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

set proxy 并没有在SocketServer.py脚本的参数中体现 #69

Open
wants to merge 18 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added .DS_Store
Binary file not shown.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
/.history/*
/TTS/models/*
/ASR/resources/models/*
/SentimentEngine/models/*
Binary file added ASR/__pycache__/ASRService.cpython-311.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
97 changes: 97 additions & 0 deletions GPT/GPTService-openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import logging
import os
import time

import GPT.machine_id
import GPT.tune as tune


class GPTService():
def __init__(self, args):
logging.info('Initializing ChatGPT Service...')
self.chatVer = args.chatVer

self.tune = tune.get_tune(args.character, args.model)

self.counter = 0

self.brainwash = args.brainwash

if self.chatVer == 1:
from revChatGPT.V1 import Chatbot
config = {}
if args.accessToken:
logging.info('Try to login with access token.')
config['access_token'] = args.accessToken

else:
logging.info('Try to login with email and password.')
config['email'] = args.email
config['password'] = args.password
config['paid'] = args.paid
config['model'] = args.model
if type(args.proxy) == str:
config['proxy'] = args.proxy

self.chatbot = Chatbot(config=config)
logging.info('WEB Chatbot initialized.')


elif self.chatVer == 3:
mach_id = GPT.machine_id.get_machine_unique_identifier()
from revChatGPT.V3 import Chatbot
if args.APIKey:
logging.info('you have your own api key. Great.')
api_key = args.APIKey
else:
logging.info('using custom API proxy, with rate limit.')
os.environ['API_URL'] = "https://api.geekerwan.net/chatgpt2"
api_key = mach_id

self.chatbot = Chatbot(api_key=api_key, proxy=args.proxy, system_prompt=self.tune)
logging.info('API Chatbot initialized.')

def ask(self, text):
stime = time.time()
if self.chatVer == 3:
prev_text = self.chatbot.ask(text)

# V1
elif self.chatVer == 1:
for data in self.chatbot.ask(
self.tune + '\n' + text
):
prev_text = data["message"]

logging.info('ChatGPT Response: %s, time used %.2f' % (prev_text, time.time() - stime))
return prev_text

def ask_stream(self, text):
prev_text = ""
complete_text = ""
stime = time.time()
if self.counter % 5 == 0 and self.chatVer == 1:
if self.brainwash:
logging.info('Brainwash mode activated, reinforce the tune.')
else:
logging.info('Injecting tunes')
asktext = self.tune + '\n' + text
else:
asktext = text
self.counter += 1
for data in self.chatbot.ask(asktext) if self.chatVer == 1 else self.chatbot.ask_stream(text):
message = data["message"][len(prev_text):] if self.chatVer == 1 else data

if ("。" in message or "!" in message or "?" in message or "\n" in message) and len(complete_text) > 3:
complete_text += message
logging.info('ChatGPT Stream Response: %s, @Time %.2f' % (complete_text, time.time() - stime))
yield complete_text.strip()
complete_text = ""
else:
complete_text += message

prev_text = data["message"] if self.chatVer == 1 else data

if complete_text.strip():
logging.info('ChatGPT Stream Response: %s, @Time %.2f' % (complete_text, time.time() - stime))
yield complete_text.strip()
41 changes: 25 additions & 16 deletions GPT/GPTService.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,28 @@
import logging
import os
import time

import GPT.machine_id
import GPT.tune as tune

import requests
import re

class GPTService():
#替换一些听起来不太顺畅的文字
def _replace_str(self, desstr):
desstr = desstr.replace("!是派蒙!", "!我是派蒙!")
desstr = desstr.replace("Paimon", "派蒙")
desstr = desstr.replace("这是派蒙", "我是派蒙")
return desstr

#过滤特殊字符, 过滤除中英文, 数字,半角(, .,?:"!),全角(,。?:“!)以外的其他字符,用于防止颜文字
def _filter_str(self, desstr):
return ''.join(re.findall(u'[\u4e00-\u9fa5a-zA-Z0-9\u002E\u002C\u003B\u003A\u0022\u0021\u0020\u003F\u0027\uFF0C\u3002\uFF1F\uFF1A\u201C\uFF01]', desstr))

def __init__(self, args):
logging.info('Initializing ChatGPT Service...')
self.chatVer = args.chatVer

self.tune = tune.get_tune(args.character, args.model)

self.counter = 0

self.brainwash = args.brainwash

if self.chatVer == 1:
Expand All @@ -23,7 +31,6 @@ def __init__(self, args):
if args.accessToken:
logging.info('Try to login with access token.')
config['access_token'] = args.accessToken

else:
logging.info('Try to login with email and password.')
config['email'] = args.email
Expand All @@ -36,34 +43,37 @@ def __init__(self, args):
self.chatbot = Chatbot(config=config)
logging.info('WEB Chatbot initialized.')


elif self.chatVer == 3:
mach_id = GPT.machine_id.get_machine_unique_identifier()
from revChatGPT.V3 import Chatbot
if args.APIKey:
logging.info('you have your own api key. Great.')
api_key = args.APIKey
else:
logging.info('using custom API proxy, with rate limit.')
os.environ['API_URL'] = "https://api.geekerwan.net/chatgpt2"
logging.info('using custom API proxy, with rate limit.')
#os.environ['API_URL'] = "http://localhost:8000/v1/chat/completions"
os.environ['API_URL'] = "http://75.63.212.152:41841/v1/"
api_key = mach_id

self.chatbot = Chatbot(api_key=api_key, proxy=args.proxy, system_prompt=self.tune)
self.chatbot = Chatbot(engine=args.model, api_key=api_key, proxy=args.proxy, system_prompt=self.tune, max_tokens=16000)
logging.info('API Chatbot initialized.')

def ask(self, text):
print("--->ask text:" + text)
stime = time.time()
if self.chatVer == 3:
prev_text = self.chatbot.ask(text)

# V1
elif self.chatVer == 1:
for data in self.chatbot.ask(
self.tune + '\n' + text
):
prev_text = data["message"]

logging.info('ChatGPT Response: %s, time used %.2f' % (prev_text, time.time() - stime))
#logging.info('ChatGPT Response: %s, time used %.2f' % (prev_text, time.time() - stime))
print("--->response:" + prev_text)
prev_text = self._filter_str(prev_text)
print("--->filterred:" + prev_text)
prev_text = self._replace_str(prev_text)
print("--->replaced:" + prev_text)
return prev_text

def ask_stream(self, text):
Expand All @@ -81,7 +91,6 @@ def ask_stream(self, text):
self.counter += 1
for data in self.chatbot.ask(asktext) if self.chatVer == 1 else self.chatbot.ask_stream(text):
message = data["message"][len(prev_text):] if self.chatVer == 1 else data

if ("。" in message or "!" in message or "?" in message or "\n" in message) and len(complete_text) > 3:
complete_text += message
logging.info('ChatGPT Stream Response: %s, @Time %.2f' % (complete_text, time.time() - stime))
Expand All @@ -94,4 +103,4 @@ def ask_stream(self, text):

if complete_text.strip():
logging.info('ChatGPT Stream Response: %s, @Time %.2f' % (complete_text, time.time() - stime))
yield complete_text.strip()
yield complete_text.strip()
Binary file added GPT/__pycache__/GPTService.cpython-311.pyc
Binary file not shown.
Binary file added GPT/__pycache__/machine_id.cpython-311.pyc
Binary file not shown.
Binary file added GPT/__pycache__/tune.cpython-311.pyc
Binary file not shown.
1 change: 1 addition & 0 deletions GPT/machine_id.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import uuid

def get_machine_unique_identifier():
mac_address = None
if platform.system() == "Windows":
# Use the Windows Management Instrumentation (WMI) interface
import wmi
Expand Down
Binary file not shown.
Binary file added SentimentEngine/__pycache__/__init__.cpython-311.pyc
Binary file not shown.
4 changes: 2 additions & 2 deletions SocketServer.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,10 @@ def __init__(self, args):
self.addr = None
self.conn = None
logging.info('Initializing Server...')
self.host = socket.gethostbyname(socket.gethostname())
self.host = "0.0.0.0"# 所有地址上面监听
self.port = 38438
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 10240000)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024000)
self.s.bind((self.host, self.port))
self.tmp_recv_file = 'tmp/server_received.wav'
self.tmp_proc_file = 'tmp/server_processed.wav'
Expand Down
76 changes: 76 additions & 0 deletions TTS-flask-paimon.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
from flask import Flask, request, send_file
from pydub import AudioSegment
import sys
import time
import os
import torch
import logging
import soundfile

sys.path.append('TTS/vits')
import TTS.vits.commons as commons
import TTS.vits.utils as utils

from TTS.vits.models import SynthesizerTrn
from TTS.vits.text.symbols import symbols
from TTS.vits.text import text_to_sequence

app = Flask(__name__)

logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)

cfg_path = "TTS/models/paimon6k.json"
model_path = "TTS/models/paimon6k_390k.pth"

def get_text(text, hps):
text_norm = text_to_sequence(text, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
return text_norm


class TTService():
def __init__(self, cfg, model, char, speed):
logging.info('Initializing TTS Service for %s...' % char)
self.hps = utils.get_hparams_from_file(cfg)
self.speed = speed
self.net_g = SynthesizerTrn(
len(symbols),
self.hps.data.filter_length // 2 + 1,
self.hps.train.segment_size // self.hps.data.hop_length,
**self.hps.model).cuda()
_ = self.net_g.eval()
_ = utils.load_checkpoint(model, self.net_g, None)

def read(self, text):
text = text.replace('~', '!')
stn_tst = get_text(text, self.hps)
with torch.no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
audio = self.net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.2, length_scale=self.speed)[0][
0, 0].data.cpu().float().numpy()
return audio

def read_save(self, text, filename, sr):
stime = time.time()
au = self.read(text)
soundfile.write(filename, au, sr)
logging.info('VITS Synth Done, time used %.2f' % (time.time() - stime))

# initialize TTService with appropriate arguments
tts = TTService(cfg=cfg_path, model=model_path, char="char_var", speed=1.0)

@app.route('/v1/audio/speech', methods=['POST'])
def post_text_to_audio():
text = request.json['input']
audio = tts.read(text)
soundfile.write('audio.wav', audio, 44100) # You may need to adjust the sample rate
sound = AudioSegment.from_wav("audio.wav")
sound.export("audio.mp3", format="mp3")
return send_file('audio.mp3', mimetype='audio/mp3')

if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000)
59 changes: 59 additions & 0 deletions TTS/TTService-cuda.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import sys
import time

sys.path.append('TTS/vits')

import soundfile
import os
os.environ["PYTORCH_JIT"] = "0"
import torch

import TTS.vits.commons as commons
import TTS.vits.utils as utils

from TTS.vits.models import SynthesizerTrn
from TTS.vits.text.symbols import symbols
from TTS.vits.text import text_to_sequence

import logging
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)


def get_text(text, hps):
text_norm = text_to_sequence(text, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
return text_norm


class TTService():
def __init__(self, cfg, model, char, speed):
logging.info('Initializing TTS Service for %s...' % char)
self.hps = utils.get_hparams_from_file(cfg)
self.speed = speed
self.net_g = SynthesizerTrn(
len(symbols),
self.hps.data.filter_length // 2 + 1,
self.hps.train.segment_size // self.hps.data.hop_length,
**self.hps.model).cuda()
_ = self.net_g.eval()
_ = utils.load_checkpoint(model, self.net_g, None)

def read(self, text):
text = text.replace('~', '!')
stn_tst = get_text(text, self.hps)
with torch.no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
audio = self.net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.2, length_scale=self.speed)[0][
0, 0].data.cpu().float().numpy()
return audio

def read_save(self, text, filename, sr):
stime = time.time()
au = self.read(text)
soundfile.write(filename, au, sr)
logging.info('VITS Synth Done, time used %.2f' % (time.time() - stime))

Loading