|
- import hydra
- from svoice.models.swave import SWave
- from mindspore import Model
- from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
- # from svoice.data.data_test2_5_5 import DatasetGenerator
- from data import DatasetGenerator
- from mindspore import save_checkpoint, set_seed, load_checkpoint, load_param_into_net
- import mindspore.dataset as ds
- from mindspore import nn
- # from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
- # from svoice.network_define import WithLossCell
- # from svoice.models.Loss_final1 import myloss
- from generatorloss import Generatorloss
- from trainonestep import TrainOneStep
- from svoice.network_define import WithLossCell
- from svoice.models.Loss_final1 import myloss
- import time
- import os
- import zipfile
- import argparse
- import json
- import socket
- import librosa
- import moxing as mox
- from mindspore import context
- from mindspore.context import ParallelMode
- from mindspore.communication.management import init, get_rank, get_group_size
-
- parser = argparse.ArgumentParser("WSJ0 data preprocessing")
- parser.add_argument('--in-dir', type=str, default=r"/home/work/user-job-dir/inputs/data/",
- help='Directory path of wsj0 including tr, cv and tt')
- parser.add_argument('--out-dir', type=str, default=r"/home/work/user-job-dir/inputs/data_json",
- help='Directory path to put output files')
- parser.add_argument('--sample-rate', type=int, default=8000,
- help='Sample rate of audio file')
- parser.add_argument('--data_url',
- help='path to training/inference dataset folder',
- default='/home/work/user-job-dir/inputs/data/')
- parser.add_argument('--train_url',
- help='model folder to save/load',
- default='/home/work/user-job-dir/model/')
- parser.add_argument(
- '--device_target',
- type=str,
- default="Ascend",
- choices=['Ascend', 'GPU', 'CPU'],
- help='device where the code will be implemented (default: Ascend)')
- parser.add_argument('--segment', type=int, default=4,
- help='Sample rate of audio file')
- parser.add_argument('--batch_size', type=int, default=4,
- help='Sample rate of audio file')
- parser.add_argument('--epochs', type=int, default=100,
- help='Sample rate of audio file')
- parser.add_argument('--device_num', type=int, default=2,
- help='Sample rate of audio file')
- parser.add_argument('--device_id', type=int, default=0,
- help='Sample rate of audio file')
- parser.add_argument('--run_distribute', type=bool, default=True,
- help='Sample rate of audio file')
- parser.add_argument('--data_batch_size', type=int, default=3,
- help='Sample rate of audio file')
- parser.add_argument('--train', type=str, default='/home/work/user-job-dir/inputs/data_json/tr',
- help='Sample rate of audio file')
- parser.add_argument('--valid', type=str, default="/home/work/user-job-dir/inputs/data_json/tr",
- help='Sample rate of audio file')
- parser.add_argument('--test', type=str, default="/home/work/user-job-dir/inputs/data_json/tr",
- help='Sample rate of audio file')
- parser.add_argument('--lr', type=float, default=5e-6,
- help='Sample rate of audio file')
- parser.add_argument('--beta2', type=float, default=0.999,
- help='Sample rate of audio file')
- parser.add_argument('--snapshots', type=int, default=1, help='Snapshots')
- parser.add_argument('--prefix', default='tpami_residual_filter8', help='Location to save checkpoint models')
- parser.add_argument('--model_type', type=str, default='swave')
- parser.add_argument('--keep_checkpoint_max', type=int, default='5')
-
-
- def preprocess_one_dir(in_dir, out_dir, out_filename, sample_rate=8000):
- """
- sample_rate: 8000
- Read the wav file and save the path and len to the json file
- """
- file_infos = []
- in_dir = os.path.abspath(in_dir)
- wav_list = os.listdir(in_dir)
- for wav_file in wav_list:
- if not wav_file.endswith('.wav'):
- continue
- wav_path = os.path.join(in_dir, wav_file)
- samples, _ = librosa.load(wav_path, sr=sample_rate)
- # if len(samples) > 128000:
- # continue
- file_infos.append((wav_path, len(samples)))
- if not os.path.exists(out_dir):
- os.makedirs(out_dir)
- with open(os.path.join(out_dir, out_filename + '.json'), 'w') as f:
- json.dump(file_infos, f, indent=4)
-
- def preprocess(args):
- """ Process all files """
- for data_type in ['tr']:
- for speaker in ['mix', 's1', 's2']:
- preprocess_one_dir(os.path.join(args.in_dir, data_type, speaker),
- os.path.join(args.out_dir, data_type),
- speaker,
- sample_rate=args.sample_rate)
-
-
- # @hydra.main(config_path="conf", config_name='config.yaml')
- def main(args):
- if args.run_distribute:
- print("distribute")
- device_id = int(os.getenv("DEVICE_ID"))
- device_num = args.device_num
- context.set_context(device_id=device_id)
- init()
- context.reset_auto_parallel_context()
- context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
- device_num=device_num)
-
- rank_id = get_rank() # 获取当前设备在集群中的ID
- rank_size = get_group_size() # 获取集群数量
- else:
- device_id = args.device_id
- context.set_context(device_id=device_id)
-
- environment = "train"
-
- # data
- # obs_data_url = args.data_url
- # args.data_url = '/home/work/user-job-dir/inputs/data/'
- # obs_train_url = args.train_url
- # args.train_url = '/home/work/user-job-dir/outputs/model/'
- # train_dir = args.train_url
-
- home = os.path.dirname(os.path.realpath(__file__))
- obs_data_url = args.data_url
- args.data_url = '/home/work/user-job-dir/inputs/data/'
-
- # 初始化模型存放目录
- train_dir = os.path.join(home, 'checkpoints') # 模型存放路径
- args.save_folder = train_dir
- # train_dir = args.train_url
- obs_train_url = args.train_url
- if not os.path.exists(train_dir):
- os.mkdir(train_dir)
-
- #将数据拷贝到训练环境
- try:
- mox.file.copy_parallel(obs_data_url, args.data_url)
- print("Successfully Download {} to {}".format(obs_data_url,
- args.data_url))
- except Exception as e:
- print('moxing download {} to {} failed: '.format(
- obs_data_url, args.data_url) + str(e))
-
- kwargs = {'N': 128, 'L': 8, 'H': 128, 'R': 6, 'C': 2, 'input_normalize': False, 'sr': 8000, 'segment': 4}
- net = SWave(**kwargs)
-
-
- # if args.model == "swave":
- # kwargs = dict(args.swave)
- # kwargs['sr'] = args.sample_rate
- # kwargs['segment'] = args.segment
- # net = SWave(**kwargs)
-
-
- # milestone = []
- # learning_rates = []
- # for i in range(1, 3000):
- # if(i%2 == 0):
- # milestone.append(i)
- # learning_rates.append(args.lr*(args.step.gamma**(i/2)))
-
- print("开始prepro-------------")
- preprocess(args)
- print("finishPrepro------------")
-
- tr_dataset = DatasetGenerator(args.train, args.data_batch_size,
- sample_rate=args.sample_rate, segment=args.segment)
- tr_loader = ds.GeneratorDataset(tr_dataset, ["mixture", "lens", "sources"], shuffle=True, num_shards=rank_size, shard_id=rank_id)
- tr_loader = tr_loader.batch(args.batch_size)
- print("dataloaderEND------------------------------")
- net = net.set_train()
- # lr = nn.piecewise_constant_lr(milestone, learning_rates)
- optimizier = nn.Adam(net.trainable_params(), learning_rate=args.lr, beta1=0.9, beta2=args.beta2, loss_scale=0.1)
- # optimizier = nn.SGD(net.trainable_params(), learning_rate=args.lr, weight_decay=args.beta2)
- # my_loss = loss(args.dset.train)
- time_cb = TimeMonitor()
- my_loss = myloss()
- loss_cb = LossMonitor()
- cb = [time_cb, loss_cb]
- net_with_loss = WithLossCell(net, my_loss)
- model = Model(net_with_loss, optimizer=optimizier)
-
- #config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
- config_ck = CheckpointConfig(save_checkpoint_steps=5,
- keep_checkpoint_max=args.keep_checkpoint_max)
- ckpt_cb = ModelCheckpoint(prefix="gdprnn", directory=args.save_folder, config=config_ck)
- cb += [ckpt_cb]
- print("----------------------start training-----------------")
- model.train(epoch=args.epochs, train_dataset=tr_loader, callbacks=cb, dataset_sink_mode=False)
-
- ######################## 将输出的模型拷贝到obs(固定写法) ########################
- # 把训练后的模型数据从本地的运行环境拷贝回obs,在启智平台相对应的训练任务中会提供下载
- try:
- mox.file.copy_parallel(train_dir, obs_train_url)
- print("Successfully Upload {} to {}".format(train_dir,
- obs_train_url))
- except Exception as e:
- print('moxing upload {} to {} failed: '.format(train_dir,
- obs_train_url) + str(e))
-
-
- if __name__ == '__main__':
- args = parser.parse_args()
- context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
- print("---------------cont------------")
- main(args)
|