医疗文本分类
最后更新 2020/05/12 15:53
阅读 1403
PyTorch BERT 自然语言处理 文本分类
玖月初识
11
获得赞0
发布的文章1
答辩的项目Batch大小为32,循环次数为10次,通过在线上环境完成训练,模型最优精度评分为82.50。
最后更新 2020/05/12 15:53
阅读 1403
PyTorch BERT 自然语言处理 文本分类
def pred_process(title, text, tokenizer, pad_size):
content = title + text
content = data_clean(content)
tokens = tokenizer.tokenize(content)
tokens = ["[CLS]"] + tokens + ["[SEP]"]
# 得到input_id, seg_id, att_mask input_id = tokenizer.convert_tokens_to_ids(tokens)
types = [0] * (len(input_id))
masks = [1] * len(input_id)
# 短则补齐,长则切断 if len(input_id) < pad_size:
types = types + [1] * (pad_size - len(input_id)) # mask部分 segment置为1 masks = masks + [0] * (pad_size - len(input_id))
input_id = input_id + [0] * (pad_size - len(input_id))
# print('ok') else:
# print(len(input_id)) types = types[:pad_size]
masks = masks[:pad_size]
input_id = input_id[:pad_size]
return input_id, types, masks
def split_train_dev_data(self):
# 随机打乱索引 random_order = list(range(len(self.input_ids)))
np.random.seed(2020) # 固定种子 np.random.shuffle(random_order)
print(random_order[:10])
# 4:1 划分训练集和测试集
self.input_ids_train = np.array([self.input_ids[i] for i in random_order[:int(len(self.input_ids) * 0.8)]])
self.input_types_train = np.array([self.input_types[i] for i in random_order[:int(len(self.input_ids) * 0.8)]])
self.input_masks_train = np.array([self.input_masks[i] for i in random_order[:int(len(self.input_ids) * 0.8)]])
self.y_train = np.array([self.labels[i] for i in random_order[:int(len(self.input_ids) * 0.8)]])
print(self.input_ids_train.shape, self.input_types_train.shape, self.input_masks_train.shape, self.y_train.shape)
self.input_ids_dev = np.array([self.input_ids[i] for i in random_order[int(len(self.input_ids) * 0.8):]])
self.input_types_dev = np.array([self.input_types[i] for i in random_order[int(len(self.input_ids) * 0.8):]])
self.input_masks_dev = np.array([self.input_masks[i] for i in random_order[int(len(self.input_ids) * 0.8):]])
self.y_dev = np.array([self.labels[i] for i in random_order[int(len(self.input_ids) * 0.8):]])
print(self.input_ids_dev.shape, self.input_types_dev.shape, self.input_masks_dev.shape, self.y_dev.shape)
预训练语言模型使用的是平台模型库的 chinese_roberta_wwm_ext_pytorch
# 训练数据的路径
DATA_PATH = os.path.join(sys.path[0], 'data', 'input')
# 加载预训练模型
path = remote_helper.get_remote_data(
'https://www.flyai.com/m/zh_roberta_wwm_ext_pytorch.zip')
# 加载后模型路径
BERT_PATH = os.path.join(DATA_PATH, 'model', 'chinese_roberta_wwm_ext_pytorch')
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = 240 self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 240)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
optimizer = BertAdam(optimizer_grouped_parameters, lr=config.learning_rate,
warmup=0.05, t_total=config.num_train_optimization_steps)
def test(self, model, test_loader):
device = config.device
model.eval()
acc = 0
for batch_idx, (x1, x2, x3, y) in enumerate(test_loader):
x1, x2, x3, y = x1.to(device), x2.to(device), x3.to(device), y.to(device)
with torch.no_grad():
y_ = model(x1, x2, x3)
_, pred = torch.max(y_, 1)
acc += pred.eq(y.view_as(pred)).sum().item() # 记得加item()
return acc / len(test_loader.dataset)
pad_size = 256 # 文本最大长度
batch_size = 32
epoch = 6
learning_rate = 3e-5
选用预训练语言模型时看了中文任务基准测评(CLUE benchmark)-排行榜(https://github.com/CLUEbenchmark/CLUE)里面长文本分类的各模型表现选取的, 它里面推荐的 pad_size=128,batch_size=24,lr=2e-5,但我在此训练集上训练时,pad_size=256,batch_size=32,lr=3e-5 的结果会好一些(不排除运气成分)。
数据处理方面看过有大佬分享的nlp数据增强的方法(同义词替换、随机插入、随机交换、随机删除),本次比赛还没来得及尝试,大家可以尝试一下。
PyTorch BERT 自然语言处理 文本分类
请先绑定您的微信账号 点击立即绑定
敬请谅解,如有疑问请联系FlyAI客服