模型:
cardiffnlp/twitter-roberta-base-2019-90m
这是一个在截至2019年底之前的90M推文上训练的RoBERTa-base模型。更多详细信息和性能分数可 在 TimeLMs paper 中找到。
下面,我们使用标准的Transformers接口提供一些用法示例。要使用另一种更适合比较在不同时间间隔训练的模型的预测和困惑度分数的接口,请查看 TimeLMs repository 。
要查看训练到不同时间段的其他模型,请查看此 table 。
用占位符 "@user" 和 "http" 替换用户名和链接。如果您想保留在训练过程中也保留的已验证用户,请保留列在 here 中的用户。
def preprocess(text): preprocessed_text = [] for t in text.split(): if len(t) > 1: t = '@user' if t[0] == '@' and t.count('@') == 1 else t t = 'http' if t.startswith('http') else t preprocessed_text.append(t) return ' '.join(preprocessed_text)
from transformers import pipeline, AutoTokenizer MODEL = "cardiffnlp/twitter-roberta-base-2019-90m" fill_mask = pipeline("fill-mask", model=MODEL, tokenizer=MODEL) tokenizer = AutoTokenizer.from_pretrained(MODEL) def pprint(candidates, n): for i in range(n): token = tokenizer.decode(candidates[i]['token']) score = candidates[i]['score'] print("%d) %.5f %s" % (i+1, score, token)) texts = [ "So glad I'm <mask> vaccinated.", "I keep forgetting to bring a <mask>.", "Looking forward to watching <mask> Game tonight!", ] for text in texts: t = preprocess(text) print(f"{'-'*30}\n{t}") candidates = fill_mask(t) pprint(candidates, 5)
输出:
------------------------------ So glad I'm <mask> vaccinated. 1) 0.28870 getting 2) 0.28611 not 3) 0.15485 fully 4) 0.07357 self 5) 0.01812 being ------------------------------ I keep forgetting to bring a <mask>. 1) 0.12194 book 2) 0.04396 pillow 3) 0.04202 bag 4) 0.03038 wallet 5) 0.02729 charger ------------------------------ Looking forward to watching <mask> Game tonight! 1) 0.65505 End 2) 0.19230 The 3) 0.03856 the 4) 0.01223 end 5) 0.00978 this
from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np from scipy.spatial.distance import cosine from collections import Counter def get_embedding(text): # naive approach for demonstration text = preprocess(text) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() return np.mean(features[0], axis=0) MODEL = "cardiffnlp/twitter-roberta-base-2019-90m" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModel.from_pretrained(MODEL) query = "The book was awesome" tweets = ["I just ordered fried chicken ?", "The movie was great", "What time is the next game?", "Just finished reading 'Embeddings in NLP'"] sims = Counter() for tweet in tweets: sim = 1 - cosine(get_embedding(query), get_embedding(tweet)) sims[tweet] = sim print('Most similar to: ', query) print(f"{'-'*30}") for idx, (tweet, sim) in enumerate(sims.most_common()): print("%d) %.5f %s" % (idx+1, sim, tweet))
输出:
Most similar to: The book was awesome ------------------------------ 1) 0.99078 The movie was great 2) 0.96701 Just finished reading 'Embeddings in NLP' 3) 0.96037 I just ordered fried chicken ? 4) 0.95919 What time is the next game?
from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np MODEL = "cardiffnlp/twitter-roberta-base-2019-90m" tokenizer = AutoTokenizer.from_pretrained(MODEL) text = "Good night ?" text = preprocess(text) # Pytorch model = AutoModel.from_pretrained(MODEL) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() features_mean = np.mean(features[0], axis=0) #features_max = np.max(features[0], axis=0) # # Tensorflow # model = TFAutoModel.from_pretrained(MODEL) # encoded_input = tokenizer(text, return_tensors='tf') # features = model(encoded_input) # features = features[0].numpy() # features_mean = np.mean(features[0], axis=0) # #features_max = np.max(features[0], axis=0)