Update README.md
Browse files
README.md
CHANGED
@@ -21,7 +21,6 @@ When the parameter skip_special_tokens is True:
|
|
21 |
|
22 |
```python
|
23 |
>>> from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline
|
24 |
-
>>> from transformers import TextGenerationPipeline,
|
25 |
>>> tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-couplet")
|
26 |
>>> model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-couplet")
|
27 |
>>> text_generator = TextGenerationPipeline(model, tokenizer)
|
@@ -33,7 +32,6 @@ When the parameter skip_special_tokens is False:
|
|
33 |
|
34 |
```python
|
35 |
>>> from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline
|
36 |
-
>>> from transformers import TextGenerationPipeline,
|
37 |
>>> tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-poem")
|
38 |
>>> model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-poem")
|
39 |
>>> text_generator = TextGenerationPipeline(model, tokenizer)
|
|
|
21 |
|
22 |
```python
|
23 |
>>> from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline
|
|
|
24 |
>>> tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-couplet")
|
25 |
>>> model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-couplet")
|
26 |
>>> text_generator = TextGenerationPipeline(model, tokenizer)
|
|
|
32 |
|
33 |
```python
|
34 |
>>> from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline
|
|
|
35 |
>>> tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-poem")
|
36 |
>>> model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-poem")
|
37 |
>>> text_generator = TextGenerationPipeline(model, tokenizer)
|