|
-
- # import hydra
- import os
- os.environ["LD_PRELOAD"] = "_check_build.cpython-37m-aarch64-linux-gnu.sosetup.py.so.1"
- from svoice.models.swave import SWave
- from mindspore import Model
- from svoice.data.data_test2_5_5 import DatasetGenerator
- from mindspore import save_checkpoint, set_seed, load_checkpoint, load_param_into_net
- import mindspore.dataset as ds
- from mindspore import nn
- # from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
- # from svoice.network_define import WithLossCell
- # from svoice.models.Loss_final1 import myloss
- from generatorloss import Generatorloss
- from trainonestep import TrainOneStep
- from svoice.network_define import WithLossCell
- from svoice.models.Loss_final1 import myloss
- import time
- import zipfile
- import argparse
- import json
- import librosa
- import moxing as mox
- from mindspore import context
- from mindspore.context import ParallelMode
- from mindspore.communication.management import init, get_rank, get_group_size
-
- parser = argparse.ArgumentParser("WSJ0 data preprocessing")
- parser.add_argument('--zip-in-dir', type=str, default=r"/home/work/user-job-dir/inputs/data/tr.zip",
- help='Directory path of wsj0 including tr, cv and tt')
- parser.add_argument('--zip-out-dir', type=str, default=r"/home/work/user-job-dir/inputs/data/",
- help='Directory path of wsj0 including tr, cv and tt')
- parser.add_argument('--in-dir', type=str, default=r"/home/work/user-job-dir/inputs/data/",
- help='Directory path of wsj0 including tr, cv and tt')
- parser.add_argument('--out-dir', type=str, default=r"/home/work/user-job-dir/inputs/data_json",
- help='Directory path to put output files')
- parser.add_argument('--sample-rate', type=int, default=8000,
- help='Sample rate of audio file')
- parser.add_argument('--data_url',
- help='path to training/inference dataset folder',
- default='/home/work/user-job-dir/inputs/data/')
- parser.add_argument(
- '--device_target',
- type=str,
- default="Ascend",
- choices=['Ascend', 'GPU', 'CPU'],
- help='device where the code will be implemented (default: Ascend)')
- parser.add_argument('--train_url',
- help='model folder to save/load',
- default='/home/work/user-job-dir/outputs/model')
- parser.add_argument('--segment', type=int, default=4,
- help='Sample rate of audio file')
- parser.add_argument('--batch_size', type=int, default=8,
- help='Sample rate of audio file')
- parser.add_argument('--epochs', type=int, default=100,
- help='Sample rate of audio file')
- parser.add_argument('--device_num', type=int, default=2,
- help='Sample rate of audio file')
- parser.add_argument('--device_id', type=int, default=0,
- help='Sample rate of audio file')
- parser.add_argument('--run_distribute', type=bool, default=True,
- help='Sample rate of audio file')
- parser.add_argument('--data_batch_size', type=int, default=3,
- help='Sample rate of audio file')
- parser.add_argument('--train', type=str, default='/home/work/user-job-dir/inputs/data_json/tr',
- help='Sample rate of audio file')
- parser.add_argument('--valid', type=str, default="/home/work/user-job-dir/inputs/data_json/tr",
- help='Sample rate of audio file')
- parser.add_argument('--test', type=str, default="/home/work/user-job-dir/inputs/data_json/tr",
- help='Sample rate of audio file')
- parser.add_argument('--lr', type=float, default=5e-6,
- help='Sample rate of audio file')
- parser.add_argument('--beta2', type=float, default=0.999,
- help='Sample rate of audio file')
-
- #asd
- def train(trainoneStep, data, args):
- trainoneStep.set_train()
- trainoneStep.set_grad()
- tr_loader = data['tr_loader']
- cv_loader = data['cv_loader']
- tt_loader = data['tt_loader']
- step = tr_loader.get_dataset_size()
-
- args.train_url = '.model/'
- train_url = args.train_url
- obs_train_url = './model/'
- if not os.path.exists(train_url):
- os.makedirs(train_url)
-
- for epoch in range(args.epochs):
-
- total_loss = 0
- j = 0
- for data in tr_loader:
- mixture, len, source = [x for x in data]
- t0 = time.time()
- # print("''''''''''''准备输出loss''''''''''''''''''''''''")
- loss = trainoneStep(mixture, len, source)
- # loss = self.network(mixture, len, source, cross_valid)
-
- # print("输出loss: ", loss)
- t1 = time.time()
- # print("第{}次trainonestp共花费时间:".format(), t1 - t0)
-
- print("epoch[{}]({}/{}),loss:{:.4f},stepTime:{}".format(epoch + 1, j+1, step, loss.asnumpy(), t1 - t0))
-
- # if j == (step//2):
- if j % 1001 == 0:
- save_ckpt = os.path.join(args.train_url, 'half{}_{}_gdprnn.ckpt'.format(epoch + 1, j))
- save_checkpoint(trainoneStep.network, save_ckpt)
- j = j + 1
- total_loss += loss
- train_loss = total_loss/j
- # train_loss = trainoneStep(epoch, tr_loader, False)
- print("epoch[{}]:trainAvgLoss:{:.4f}".format(epoch + 1, train_loss.asnumpy()))
-
- # valid_loss = trainoneStep(epoch, cv_loader, True)
- # print("epoch[{}]:validAvgLoss:{:.4f}".format(epoch + 1, valid_loss.asnumpy()))
-
- # if (epoch + 1) % args.eval_every == 0 or epoch == args.epochs - 1:
- # test_loss = trainoneStep(epoch, tt_loader, cross_valid=True)
- # print("epoch[{}]:testAvgLoss:{:.4f}".format(epoch + 1, test_loss.asnumpy()))
-
- save_ckpt = os.path.join(args.train_url, '{}_gdprnn.ckpt'.format(epoch + 1))
- save_checkpoint(trainoneStep.network, save_ckpt)
-
-
- try:
- mox.file.copy_parallel(train_url, obs_train_url)
- print("Successfully Upload {} to {}".format(train_url,
- obs_train_url))
- except Exception as e:
- print('moxing upload {} to {} failed: '.format(train_url,
- obs_train_url) + str(e))
-
-
-
- def preprocess_one_dir(in_dir, out_dir, out_filename, sample_rate=8000):
- """
- sample_rate: 8000
- Read the wav file and save the path and len to the json file
- """
- file_infos = []
- in_dir = os.path.abspath(in_dir)
- wav_list = os.listdir(in_dir)
- for wav_file in wav_list:
- if not wav_file.endswith('.wav'):
- continue
- wav_path = os.path.join(in_dir, wav_file)
- samples, _ = librosa.load(wav_path, sr=sample_rate)
- # if len(samples) > 128000:
- # continue
- file_infos.append((wav_path, len(samples)))
- if not os.path.exists(out_dir):
- os.makedirs(out_dir)
- with open(os.path.join(out_dir, out_filename + '.json'), 'w') as f:
- json.dump(file_infos, f, indent=4)
-
- def preprocess(args):
- """ Process all files """
- for data_type in ['tr']:
- for speaker in ['mix', 's1', 's2']:
- preprocess_one_dir(os.path.join(args.in_dir, data_type, speaker),
- os.path.join(args.out_dir, data_type),
- speaker,
- sample_rate=args.sample_rate)
-
-
- #@hydra.main(config_path="conf", config_name='config.yaml')
- def main(args):
- if args.run_distribute:
- print("distribute")
- device_id = int(os.getenv("DEVICE_ID", '0'))
- device_num = args.device_num
- context.set_context(device_id=device_id)
- init()
- context.reset_auto_parallel_context()
- context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
- device_num=device_num)
- rank = get_rank()
- else:
- device_id = args.device_id
- # device_id = int(os.getenv("DEVICE_ID"))
- context.set_context(device_id=device_id)
- # param_dict = load_checkpoint("/home/heu_MEDAI/liwenjie/svoice-main-mindspore/outputs/exp_/half4_0_gdprnn.ckpt")
- # 定义模型
-
- # environment = 'debug'
- # environment = 'train'
- # if environment == 'debug':
- # workroot = '/home/ma-user/work' #调试任务使用该参数
- # else:
- # workroot = '/home/work/user-job-dir' # 训练任务使用该参数
- # print('current work mode:' + environment + ', workroot:' + workroot)
-
- # data_dir = workroot + '/svoice-main/dataset' #数据集存放路径
- # train_dir = workroot + '/svoice-main/outputs'
- # data_url = workroot + "/svoice-main/dataset/"
-
- # if environment == 'train':
- # obs_data_url = data_url
- # #将数据拷贝到训练环境
- # try:
- # mox.file.copy_parallel(obs_data_url, data_dir)
- # print("Successfully Download {} to {}".format(obs_data_url,
- # data_dir))
- # except Exception as e:
- # print('moxing download {} to {} failed: '.format(
- # obs_data_url, data_dir) + str(e))
-
-
- print("--------------------MAIN----------------------------")
- environment = "train"
-
- # data
- obs_data_url = args.data_url
- args.data_url = '/home/work/user-job-dir/inputs/data/'
- args.train_url = './model/'
- obs_train_url = args.train_url
- train_dir = args.train_url
-
- #将数据拷贝到训练环境
- try:
- mox.file.copy_parallel(obs_data_url, args.data_url)
- print("Successfully Download {} to {}".format(obs_data_url,
- args.data_url))
- except Exception as e:
- print('moxing download {} to {} failed: '.format(
- obs_data_url, args.data_url) + str(e))
-
- # def unzip(file_path, root):
- # # file_path 为zip文件的全路径
- # # root 为解压后的路径
- # zip_file = os.path.join(file_path, str(file_path)) # 拼接文件的全名
- # fz = zipfile.ZipFile(zip_file, "r")
- # for file in fz.namelist():
- # fz.extract(file, root)
- # fz.close()
-
-
- # if args.model == "swave":
- # kwargs = dict(args.swave)
- # kwargs['sr'] = args.sample_rate
- # kwargs['segment'] = args.segment
- # net = SWave(**kwargs)
- kwargs = {'N': 128, 'L': 8, 'H': 128, 'R': 6, 'C': 2, 'input_normalize': False, 'sr': 8000, 'segment': 4}
- net = SWave(**kwargs)
-
- # load_param_into_net(net, param_dict)
- #
- # 加载数据集
- # unzip(arg.zip_in_dir, arg.zip_out_dir)
- print("开始prepro-------------")
- preprocess(args)
-
- tr_dataset = DatasetGenerator(args.train, args.data_batch_size,
- sample_rate=args.sample_rate, segment=args.segment)
- tr_loader = ds.GeneratorDataset(tr_dataset, ["mixture", "lens", "sources"], shuffle=False)
- tr_loader = tr_loader.batch(args.batch_size)
-
- cv_dataset = DatasetGenerator(args.valid, args.data_batch_size,
- sample_rate=args.sample_rate, segment=args.segment)
- cv_loader = ds.GeneratorDataset(cv_dataset, ["mixture", "lens", "sources"], shuffle=False)
- cv_loader = cv_loader.batch(args.batch_size)
-
- tt_dataset = DatasetGenerator(args.test, args.data_batch_size,
- sample_rate=args.sample_rate, segment=args.segment)
- tt_loader = ds.GeneratorDataset(tt_dataset, ["mixture", "lens", "sources"], shuffle=False)
- tt_loader = tt_loader.batch(args.batch_size)
-
- data = {"tr_loader": tr_loader,
- "cv_loader": cv_loader, "tt_loader": tt_loader}
-
- # for _ in tr_loader:
- # print(_)
-
- # loss
- # my_loss = myloss()
- loss_network = Generatorloss(net)
- # loss_network = WithLossCell(net, my_loss)
-
- # 定义优化器
- optimizier = nn.Adam(net.trainable_params(), learning_rate=args.lr, beta1=0.9, beta2=args.beta2)
-
- # 前向到loss
- trainonestepNet = TrainOneStep(loss_network, optimizier, sens=1.0)
- # trainonestepNet = nn.TrainOneStepCell(loss_network, optimizier, sens=1.0)
- # trainonestepNet.set_train()
-
- train(trainonestepNet, data, args)
-
-
- # if environment == 'train':
- # try:
- # mox.file.copy_parallel(train_dir, obs_train_url)
- # print("Successfully Upload {} to {}".format(train_dir,
- # obs_train_url))
- # except Exception as e:
- # print('moxing upload {} to {} failed: '.format(train_dir,
- # obs_train_url) + str(e))
-
-
-
-
-
-
-
-
- if __name__ == '__main__':
- args = parser.parse_args()
- # set_seed(42)
- print("_________________context")
- context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
- print("---------------cont------------")
- # context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=7, save_graphs=True)
- main(args)
|