模型:
tuner007/t5_abs_qa
这是使用文本到文本方法对T5-base模型进行精调的抽象问答模型
该模型在Colab TPU上进行了2个时轮的训练,内存为35GB
from transformers import AutoModelWithLMHead, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("tuner007/t5_abs_qa") model = AutoModelWithLMHead.from_pretrained("tuner007/t5_abs_qa") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) def get_answer(question, context): input_text = "context: %s <question for context: %s </s>" % (context,question) features = tokenizer([input_text], return_tensors='pt') out = model.generate(input_ids=features['input_ids'].to(device), attention_mask=features['attention_mask'].to(device)) return tokenizer.decode(out[0])示例1:有答案
context = "In Norse mythology, Valhalla is a majestic, enormous hall located in Asgard, ruled over by the god Odin." question = "What is Valhalla?" get_answer(question, context) # output: 'It is a hall of worship ruled by Odin.'示例2:无答案
context = "In Norse mythology, Valhalla is a majestic, enormous hall located in Asgard, ruled over by the god Odin." question = "What is Asgard?" get_answer(question, context) # output: 'No answer available in context.'