Skip to content

Commit

Permalink
WIP: docker support v1.5.x trial 5
Browse files Browse the repository at this point in the history
  • Loading branch information
wataru committed Feb 10, 2023
1 parent 954a26b commit 3ec1902
Show file tree
Hide file tree
Showing 11 changed files with 946 additions and 56 deletions.
11 changes: 10 additions & 1 deletion client/demo/dist/index.html
Original file line number Diff line number Diff line change
@@ -1 +1,10 @@
<!doctype html><html style="width:100%;height:100%;overflow:hidden"><head><meta charset="utf-8"/><title>Voice Changer Client Demo</title><script defer="defer" src="index.js"></script></head><body style="width:100%;height:100%;margin:0"><div id="app" style="width:100%;height:100%"></div></body></html>
<!DOCTYPE html>
<html style="width: 100%; height: 100%; overflow: hidden">
<head>
<meta charset="utf-8" />
<title>Voice Changer Client Demo</title>
<script defer src="index.js"></script></head>
<body style="width: 100%; height: 100%; margin: 0px">
<div id="app" style="width: 100%; height: 100%"></div>
</body>
</html>
597 changes: 595 additions & 2 deletions client/demo/dist/index.js

Large diffs are not rendered by default.

31 changes: 0 additions & 31 deletions client/demo/dist/index.js.LICENSE.txt

This file was deleted.

20 changes: 19 additions & 1 deletion client/demo/src/103_speaker_setting.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,23 @@ export const useSpeakerSetting = (props: UseSpeakerSettingProps) => {
}, [props.clientState.clientSetting.setting.speakers, editSpeakerTargetId, editSpeakerTargetName])


const f0FactorRow = useMemo(() => {
return (
<div className="body-row split-3-2-1-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">F0 Factor</div>
<div className="body-input-container">
<input type="range" className="body-item-input" min="0.1" max="5.0" step="0.1" value={props.clientState.serverSetting.setting.f0Factor} onChange={(e) => {
props.clientState.serverSetting.setF0Factor(Number(e.target.value))
}}></input>
</div>
<div className="body-item-text">
<div>{props.clientState.serverSetting.setting.f0Factor}</div>
</div>
<div className="body-item-text"></div>
</div>
)
}, [props.clientState.serverSetting.setting.f0Factor, props.clientState.serverSetting.setF0Factor])

const speakerSetting = useMemo(() => {
return (
<>
Expand All @@ -105,9 +122,10 @@ export const useSpeakerSetting = (props: UseSpeakerSettingProps) => {
{srcIdRow}
{dstIdRow}
{editSpeakerIdMappingRow}
{f0FactorRow}
</>
)
}, [srcIdRow, dstIdRow, editSpeakerIdMappingRow])
}, [srcIdRow, dstIdRow, editSpeakerIdMappingRow, f0FactorRow])

return {
speakerSetting,
Expand Down
22 changes: 22 additions & 0 deletions client/demo/src/css/App.css
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,28 @@ body {
width: 40%;
}
}
.split-3-2-1-4 {
display: flex;
width: 100%;
justify-content: center;
margin: 1px 0px 1px 0px;
& > div:nth-child(1) {
left: 0px;
width: 30%;
}
& > div:nth-child(2) {
left: 30%;
width: 20%;
}
& > div:nth-child(3) {
left: 50%;
width: 10%;
}
& > div:nth-child(4) {
left: 60%;
width: 40%;
}
}
.split-3-2-2-2-1 {
display: flex;
width: 100%;
Expand Down
7 changes: 6 additions & 1 deletion client/lib/src/const.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ export type VoiceChangerServerSetting = {

framework: Framework
onnxExecutionProvider: OnnxExecutionProvider,

f0Factor: number
}

export type VoiceChangerClientSetting = {
Expand Down Expand Up @@ -61,6 +63,7 @@ export type ServerInfo = {
dstId: number,
framework: Framework,
onnxExecutionProvider: string[]
f0Factor: number
}


Expand Down Expand Up @@ -120,7 +123,8 @@ export const ServerSettingKey = {
"crossFadeEndRate": "crossFadeEndRate",
"crossFadeOverlapRate": "crossFadeOverlapRate",
"framework": "framework",
"onnxExecutionProvider": "onnxExecutionProvider"
"onnxExecutionProvider": "onnxExecutionProvider",
"f0Factor": "f0Factor"
} as const
export type ServerSettingKey = typeof ServerSettingKey[keyof typeof ServerSettingKey]

Expand All @@ -136,6 +140,7 @@ export const DefaultVoiceChangerServerSetting: VoiceChangerServerSetting = {
crossFadeEndRate: 0.9,
crossFadeOverlapRate: 0.5,
framework: "ONNX",
f0Factor: 1.0,
onnxExecutionProvider: "CPUExecutionProvider"

}
Expand Down
14 changes: 12 additions & 2 deletions client/lib/src/hooks/useServerSetting.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ export type ServerSettingState = {
setCrossFadeOffsetRate: (num: number) => Promise<boolean>;
setCrossFadeEndRate: (num: number) => Promise<boolean>;
setCrossFadeOverlapRate: (num: number) => Promise<boolean>;
setF0Factor: (num: number) => Promise<boolean>;
reloadServerInfo: () => Promise<void>;
setFileUploadSetting: (val: FileUploadSetting) => void
loadModel: () => Promise<void>
Expand Down Expand Up @@ -95,6 +96,7 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
props.voiceChangerClient.updateServerSettings(ServerSettingKey.crossFadeOffsetRate, "" + setting.crossFadeOffsetRate)
props.voiceChangerClient.updateServerSettings(ServerSettingKey.crossFadeEndRate, "" + setting.crossFadeEndRate)
props.voiceChangerClient.updateServerSettings(ServerSettingKey.crossFadeOverlapRate, "" + setting.crossFadeOverlapRate)
props.voiceChangerClient.updateServerSettings(ServerSettingKey.f0Factor, "" + setting.f0Factor)

}, [props.voiceChangerClient])

Expand All @@ -120,7 +122,8 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
crossFadeEndRate: res.crossFadeEndRate,
crossFadeOverlapRate: res.crossFadeOverlapRate,
framework: res.framework,
onnxExecutionProvider: (!!res.onnxExecutionProvider && res.onnxExecutionProvider.length > 0) ? res.onnxExecutionProvider[0] as OnnxExecutionProvider : DefaultVoiceChangerServerSetting.onnxExecutionProvider
onnxExecutionProvider: (!!res.onnxExecutionProvider && res.onnxExecutionProvider.length > 0) ? res.onnxExecutionProvider[0] as OnnxExecutionProvider : DefaultVoiceChangerServerSetting.onnxExecutionProvider,
f0Factor: res.f0Factor
}
_setSetting(newSetting)
setItem(INDEXEDDB_KEY_SERVER, newSetting)
Expand Down Expand Up @@ -191,6 +194,11 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
}
}, [props.voiceChangerClient])

const setF0Factor = useMemo(() => {
return async (num: number) => {
return await _set_and_store(ServerSettingKey.f0Factor, "" + num)
}
}, [props.voiceChangerClient])
//////////////
// 操作
/////////////
Expand Down Expand Up @@ -328,7 +336,8 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
crossFadeEndRate: res.crossFadeEndRate,
crossFadeOverlapRate: res.crossFadeOverlapRate,
framework: res.framework,
onnxExecutionProvider: (!!res.onnxExecutionProvider && res.onnxExecutionProvider.length > 0) ? res.onnxExecutionProvider[0] as OnnxExecutionProvider : DefaultVoiceChangerServerSetting.onnxExecutionProvider
onnxExecutionProvider: (!!res.onnxExecutionProvider && res.onnxExecutionProvider.length > 0) ? res.onnxExecutionProvider[0] as OnnxExecutionProvider : DefaultVoiceChangerServerSetting.onnxExecutionProvider,
f0Factor: res.f0Factor
})
}
}, [props.voiceChangerClient])
Expand All @@ -354,6 +363,7 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
setCrossFadeOffsetRate,
setCrossFadeEndRate,
setCrossFadeOverlapRate,
setF0Factor,
reloadServerInfo,
setFileUploadSetting,
loadModel,
Expand Down
2 changes: 1 addition & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ RUN cd MMVC_Client && git checkout 04f3fec4fd82dea6657026ec4e1cd80fb29a415c && c
WORKDIR /
ADD dummy /

RUN git clone --depth 1 https://github.com/w-okada/voice-changer.git
RUN git clone --depth 1 https://github.com/w-okada/voice-changer.git


#########
Expand Down
6 changes: 5 additions & 1 deletion server/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,11 @@ $ conda activate mmvc-server
$ pip install -r requirements.txt
$ git clone https://github.com/isletennos/MMVC_Client.git
$ cd MMVC_Client && git checkout 04f3fec4fd82dea6657026ec4e1cd80fb29a415c && cd -
$ cd MMVC_Client && git checkout 3374a1177b73e3f6d600e5dbe93af033c36ee120 && cd -
$ git clone https://github.com/isletennos/MMVC_Trainer.git
$ cd MMVC_Trainer && git checkout c242d3d1cf7f768af70d9735082ca2bdd90c45f3 && cd -
$ python3 MMVCServerSIO.py -p 18888 --https true
```

84 changes: 68 additions & 16 deletions server/voice_changer/VoiceChanger.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,12 @@
from symbols import symbols
from models import SynthesizerTrn

from voice_changer.TrainerFunctions import TextAudioSpeakerCollate, spectrogram_torch, load_checkpoint, get_hparams_from_file
import pyworld as pw

# from voice_changer.TrainerFunctions import TextAudioSpeakerCollate, spectrogram_torch, load_checkpoint, get_hparams_from_file

from voice_changer.client_modules import convert_continuos_f0, spectrogram_torch, TextAudioSpeakerCollate, get_hparams_from_file, load_checkpoint


providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]

Expand All @@ -26,12 +31,15 @@ class VocieChangerSettings():
convertChunkNum: int = 32
minConvertSize: int = 0
framework: str = "ONNX" # PyTorch or ONNX
f0Factor: float = 1.0

pyTorchModelFile: str = ""
onnxModelFile: str = ""
configFile: str = ""

# ↓mutableな物だけ列挙
intData = ["gpu", "srcId", "dstId", "convertChunkNum", "minConvertSize"]
floatData = ["crossFadeOffsetRate", "crossFadeEndRate", "crossFadeOverlapRate"]
floatData = ["crossFadeOffsetRate", "crossFadeEndRate", "crossFadeOverlapRate", "f0Factor"]
strData = ["framework"]


Expand Down Expand Up @@ -66,11 +74,23 @@ def loadModel(self, config: str, pyTorch_model_file: str = None, onnx_model_file
# PyTorchモデル生成
if pyTorch_model_file != None:
self.net_g = SynthesizerTrn(
len(symbols),
self.hps.data.filter_length // 2 + 1,
self.hps.train.segment_size // self.hps.data.hop_length,
spec_channels=self.hps.data.filter_length // 2 + 1,
segment_size=self.hps.train.segment_size // self.hps.data.hop_length,
inter_channels=self.hps.model.inter_channels,
hidden_channels=self.hps.model.hidden_channels,
upsample_rates=self.hps.model.upsample_rates,
upsample_initial_channel=self.hps.model.upsample_initial_channel,
upsample_kernel_sizes=self.hps.model.upsample_kernel_sizes,
n_flow=self.hps.model.n_flow,
dec_out_channels=1,
dec_kernel_size=7,
n_speakers=self.hps.data.n_speakers,
**self.hps.model)
gin_channels=self.hps.model.gin_channels,
requires_grad_pe=self.hps.requires_grad.pe,
requires_grad_flow=self.hps.requires_grad.flow,
requires_grad_text_enc=self.hps.requires_grad.text_enc,
requires_grad_dec=self.hps.requires_grad.dec
)
self.net_g.eval()
load_checkpoint(pyTorch_model_file, self.net_g, None)
# utils.load_checkpoint(pyTorch_model_file, self.net_g, None)
Expand Down Expand Up @@ -174,14 +194,31 @@ def _generate_input(self, unpackedData: any, convertSize: int):
audio_norm = self.audio_buffer[:, -convertSize:] # 変換対象の部分だけ抽出
self.audio_buffer = audio_norm

# TBD: numpy <--> pytorch変換が行ったり来たりしているが、まずは動かすことを最優先。
audio_norm_np = audio_norm.squeeze().numpy().astype(np.double)
_f0, _time = pw.dio(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5)
f0 = pw.stonemask(audio_norm_np, _f0, _time, self.hps.data.sampling_rate)
f0 = convert_continuos_f0(f0, int(audio_norm_np.shape[0] / self.hps.data.hop_length))
f0 = torch.from_numpy(f0.astype(np.float32))

spec = spectrogram_torch(audio_norm, self.hps.data.filter_length,
self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length,
center=False)
# dispose_stft_specs = 2
# spec = spec[:, dispose_stft_specs:-dispose_stft_specs]
# f0 = f0[dispose_stft_specs:-dispose_stft_specs]
spec = torch.squeeze(spec, 0)
sid = torch.LongTensor([int(self.settings.srcId)])

data = (self.text_norm, spec, audio_norm, sid)
data = TextAudioSpeakerCollate()([data])
# data = (self.text_norm, spec, audio_norm, sid)
# data = TextAudioSpeakerCollate()([data])
data = TextAudioSpeakerCollate(
sample_rate=self.hps.data.sampling_rate,
hop_size=self.hps.data.hop_length,
f0_factor=self.settings.f0Factor # TBD: parameter
# f0_factor=2.4 # TBD: parameter
)([(spec, sid, f0)])

return data

def _onnx_inference(self, data, inputSize):
Expand Down Expand Up @@ -224,10 +261,15 @@ def _pyTorch_inference(self, data, inputSize):

if self.settings.gpu < 0 or self.gpu_num == 0:
with torch.no_grad():
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.cpu() for x in data]
sid_tgt1 = torch.LongTensor([self.settings.dstId]).cpu()
audio1 = (self.net_g.cpu().voice_conversion(spec, spec_lengths, sid_src=sid_src,
sid_tgt=sid_tgt1)[0, 0].data * self.hps.data.max_wav_value)
spec, spec_lengths, sid_src, sin, d = data
spec = spec.cpu()
spec_lengths = spec_lengths.cpu()
sid_src = sid_src.cpu()
sin = sin.cpu()
d = tuple([d[:1].cpu() for d in d])
sid_target = torch.LongTensor([self.settings.dstId]).cpu()

audio1 = self.net_g.cpu().voice_conversion(spec, spec_lengths, sin, d, sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value

if self.prev_strength.device != torch.device('cpu'):
print(f"prev_strength move from {self.prev_strength.device} to cpu")
Expand Down Expand Up @@ -256,11 +298,21 @@ def _pyTorch_inference(self, data, inputSize):

else:
with torch.no_grad():
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.cuda(self.settings.gpu) for x in data]
sid_tgt1 = torch.LongTensor([self.settings.dstId]).cuda(self.settings.gpu)
audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sid_src=sid_src,
sid_tgt=sid_tgt1)[0, 0].data * self.hps.data.max_wav_value
spec, spec_lengths, sid_src, sin, d = data
spec = spec.cuda(self.settings.gpu)
spec_lengths = spec_lengths.cuda(self.settings.gpu)
sid_src = sid_src.cuda(self.settings.gpu)
sin = sin.cuda(self.settings.gpu)
d = tuple([d[:1].cuda(self.settings.gpu) for d in d])
sid_target = torch.LongTensor([self.settings.dstId]).cuda(self.settings.gpu)

# audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sid_src=sid_src,
# sid_tgt=sid_tgt1)[0, 0].data * self.hps.data.max_wav_value

audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sin, d,
sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value

# audio1 = audio1[10:-10]
if self.prev_strength.device != torch.device('cuda', self.settings.gpu):
print(f"prev_strength move from {self.prev_strength.device} to gpu{self.settings.gpu}")
self.prev_strength = self.prev_strength.cuda(self.settings.gpu)
Expand Down
Loading

0 comments on commit 3ec1902

Please sign in to comment.