强制 BERT 转换器使用 CUDA

Force BERT transformer to use CUDA

我想强制 Huggingface 转换器 (BERT) 使用 CUDA。 nvidia-smi 显示我的所有 CPU 核心在代码执行期间都已达到极限,但我的 GPU 利用率为 0%。不幸的是,我是 Hugginface 库和 PyTorch 的新手,不知道在哪里放置 CUDA 属性 device = cuda:0.to(cuda:0).

下面的代码基本上是 german sentiment BERT working example

的自定义部分
class SentimentModel_t(pt.nn.Module):
      def __init__(self, model_name: str = "oliverguhr/german-sentiment-bert"):
           DEVICE = "cuda:0" if pt.cuda.is_available() else "cpu"
           print(DEVICE)
           super(SentimentModel_t,self).__init__()

           self.model = AutoModelForSequenceClassification.from_pretrained(model_name).to(DEVICE)
           self.tokenizer = BertTokenizerFast.from_pretrained(model_name)
    
        def predict_sentiment(self, texts: List[str])-> List[str]:
            texts = [self.clean_text(text) for text in texts]
            # Add special tokens takes care of adding [CLS], [SEP], <s>... tokens in the right way for each model.
            input_ids = self.tokenizer.batch_encode_plus(texts,padding=True, add_special_tokens=True, truncation=True, max_length=self.tokenizer.max_len_single_sentence)
            input_ids = pt.tensor(input_ids["input_ids"])
    
            with pt.no_grad():
                logits = self.model(input_ids)
    
            label_ids = pt.argmax(logits[0], axis=1)
    
            labels = [self.model.config.id2label[label_id] for label_id in label_ids.tolist()]
            return labels

编辑:应用@KonstantinosKokos 的建议后(见上面编辑过的代码)我得到了

RuntimeError: Input, output and indices must be on the current device

指向

        with pt.no_grad():
           logits = self.model(input_ids)

完整的错误代码可以在下面获得:

<ipython-input-15-b843edd87a1a> in predict_sentiment(self, texts)
     23 
     24         with pt.no_grad():
---> 25             logits = self.model(input_ids)
     26 
     27         label_ids = pt.argmax(logits[0], axis=1)

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/transformers/models/bert/modeling_bert.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict)
   1364         return_dict = return_dict if return_dict is not None else self.config.use_return_dict
   1365 
-> 1366         outputs = self.bert(
   1367             input_ids,
   1368             attention_mask=attention_mask,

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/transformers/models/bert/modeling_bert.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, output_attentions, output_hidden_states, return_dict)
    859         head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
    860 
--> 861         embedding_output = self.embeddings(
    862             input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
    863         )

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/transformers/models/bert/modeling_bert.py in forward(self, input_ids, token_type_ids, position_ids, inputs_embeds)
    196 
    197         if inputs_embeds is None:
--> 198             inputs_embeds = self.word_embeddings(input_ids)
    199         token_type_embeddings = self.token_type_embeddings(token_type_ids)
    200 

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/torch/nn/modules/sparse.py in forward(self, input)
    122 
    123     def forward(self, input: Tensor) -> Tensor:
--> 124         return F.embedding(
    125             input, self.weight, self.padding_idx, self.max_norm,
    126             self.norm_type, self.scale_grad_by_freq, self.sparse)

~/PycharmProjects/Test_project/venv/lib/python3.8/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
   1850         # remove once script supports set_grad_enabled
   1851         _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1852     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
   1853 
   1854 

您可以像这样使整个 class 继承 torch.nn.Module

class SentimentModel_t(torch.nn.Module):
    def __init___(...)
    super(SentimentModel_t, self).__init__()
    ...

初始化模型后,您可以调用 .to(device) 将其投射到您选择的设备,如下所示:

sentiment_model = SentimentModel_t(...)
sentiment_model.to('cuda')

.to()递归应用于class的所有子模块,model是其中之一(抱脸模型继承torch.nn.Module,从而为to()). 请注意,这使得在 __init__() 中选择设备变得多余:它现在是一个外部上下文,您可以轻松切换 to/from。


或者,您可以通过将包含的 BERT 模型直接转换为 cuda(不太优雅)来对设备进行硬编码:

class SentimentModel_t():
        def __init__(self, ...):
            DEVICE = "cuda:0" if pt.cuda.is_available() else "cpu"
            print(DEVICE)

            self.model = AutoModelForSequenceClassification.from_pretrained(model_name).to(DEVICE)

我来晚了一点。我编写的 python 包已经使用了您的 GPU。你可以看看 code to see how it was implemented

只需安装软件包:

pip install germansentiment

和运行代码:

from germansentiment import SentimentModel

model = SentimentModel()

texts = [
    "Mit keinem guten Ergebniss","Das ist gar nicht mal so gut",
    "Total awesome!","nicht so schlecht wie erwartet",
    "Der Test verlief positiv.","Sie fährt ein grünes Auto."]

result = model.predict_sentiment(texts)
print(result)

重要提示:如果您自己编写代码来使用该模型,您还需要运行预处理代码。否则结果可能不对。