charlescxk commited on
Commit
ffcf344
·
1 Parent(s): e47d37d

upload ckpt

Browse files
README.md CHANGED
@@ -1,3 +1,50 @@
1
  ---
2
  license: mit
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
+ license_name: deepseek
4
+ license_link: LICENSE
5
+ pipeline_tag: any-to-any
6
+ library_name: transformers
7
+ tags:
8
+ - muiltimodal
9
+ - text-to-image
10
+ - unified-model
11
  ---
12
+
13
+ ## 1. Introduction
14
+
15
+ Janus is a novel autoregressive framework that unifies multimodal understanding and generation.
16
+ It addresses the limitations of previous approaches by decoupling visual encoding into separate pathways, while still utilizing a single, unified transformer architecture for processing. The decoupling not only alleviates the conflict between the visual encoder’s roles in understanding and generation, but also enhances the framework’s flexibility.
17
+ Janus surpasses previous unified model and matches or exceeds the performance of task-specific models.
18
+ The simplicity, high flexibility, and effectiveness of Janus make it a strong candidate for next-generation unified multimodal models.
19
+
20
+ [**Github Repository**](https://github.com/deepseek-ai/Janus)
21
+
22
+ <div align="center">
23
+ <img alt="image" src="teaser.png" style="width:90%;">
24
+ </div>
25
+
26
+
27
+ ### 2. Model Summary
28
+
29
+ Janus is a unified understanding and generation MLLM, which decouples visual encoding for multimodal understanding and generation.
30
+ Janus is constructed based on the DeepSeek-LLM-1.3b-base which is trained on an approximate corpus of 500B text tokens.
31
+ For multimodal understanding, it uses the [SigLIP-L](https://huggingface.co/timm/ViT-L-16-SigLIP-384) as the vision encoder, which supports 384 x 384 image input. For image generation, Janus uses the tokenizer from [here](https://github.com/FoundationVision/LlamaGen) with a downsample rate of 16.
32
+
33
+
34
+
35
+ ## 3. Quick Start
36
+
37
+ Please refer to [**Github Repository**](https://github.com/deepseek-ai/Janus)
38
+
39
+
40
+ ## 4. License
41
+
42
+ This code repository is licensed under [the MIT License](https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-CODE). The use of Janus models is subject to [DeepSeek Model License](https://github.com/deepseek-ai/DeepSeek-LLM/blob/HEAD/LICENSE-MODEL).
43
+ ## 5. Citation
44
+
45
+ ```
46
+ ```
47
+
48
+ ## 6. Contact
49
+
50
+ If you have any questions, please raise an issue or contact us at [[email protected]](mailto:[email protected]).
config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "aligner_config": {
3
+ "cls": "MlpProjector",
4
+ "model_type": "aligner",
5
+ "params": {
6
+ "depth": 2,
7
+ "input_dim": 1024,
8
+ "n_embed": 2048,
9
+ "projector_type": "mlp_gelu"
10
+ }
11
+ },
12
+ "architectures": [
13
+ "MultiModalityCausalLM"
14
+ ],
15
+ "gen_aligner_config": {
16
+ "cls": "MlpProjector",
17
+ "model_type": "gen_aligner",
18
+ "params": {
19
+ "depth": 2,
20
+ "input_dim": 8,
21
+ "n_embed": 2048,
22
+ "projector_type": "mlp_gelu"
23
+ }
24
+ },
25
+ "gen_head_config": {
26
+ "cls": "vision_head",
27
+ "model_type": "gen_head",
28
+ "params": {
29
+ "image_token_embed": 2048,
30
+ "image_token_size": 16384,
31
+ "n_embed": 2048
32
+ }
33
+ },
34
+ "gen_vision_config": {
35
+ "cls": "VQ-16",
36
+ "model_type": "gen_vision",
37
+ "params": {
38
+ "image_token_size": 16384,
39
+ "n_embed": 8
40
+ }
41
+ },
42
+ "language_config": {
43
+ "hidden_size": 2048,
44
+ "intermediate_size": 5632,
45
+ "max_position_embeddings": 16384,
46
+ "model_type": "llama",
47
+ "num_attention_heads": 16,
48
+ "num_hidden_layers": 24,
49
+ "num_key_value_heads": 16,
50
+ "torch_dtype": "bfloat16",
51
+ "vocab_size": 102400
52
+ },
53
+ "model_type": "multi_modality",
54
+ "torch_dtype": "bfloat16",
55
+ "transformers_version": "4.33.1",
56
+ "vision_config": {
57
+ "cls": "CLIPVisionTower",
58
+ "model_type": "vision",
59
+ "params": {
60
+ "image_size": 384,
61
+ "model_name": "siglip_large_patch16_384",
62
+ "select_feature": "same",
63
+ "select_layer": -1
64
+ }
65
+ }
66
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "background_color": [
3
+ 127,
4
+ 127,
5
+ 127
6
+ ],
7
+ "do_normalize": true,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_processor_type": "VLMImageProcessor",
14
+ "image_size": 384,
15
+ "image_std": [
16
+ 0.5,
17
+ 0.5,
18
+ 0.5
19
+ ],
20
+ "min_size": 14,
21
+ "processor_class": "VLChatProcessor",
22
+ "rescale_factor": 0.00392156862745098
23
+ }
processor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_special_token": false,
3
+ "ignore_id": -100,
4
+ "image_tag": "<image_placeholder>",
5
+ "mask_prompt": true,
6
+ "num_image_tokens": 576,
7
+ "processor_class": "VLChatProcessor",
8
+ "sft_format": "deepseek"
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7cf164cbed272be2a9999bc4c314da6a6f23ef51871ddef3afc2c0c430cc3f
3
+ size 4178890389
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<image_placeholder>",
4
+ "<patch_placeholder>",
5
+ "<|ref|>",
6
+ "<|/ref|>",
7
+ "<|det|>",
8
+ "<|/det|>",
9
+ "<|grounding|>",
10
+ "<|User|>",
11
+ "<|Assistant|>"
12
+ ],
13
+ "bos_token": "<|begin▁of▁sentence|>",
14
+ "eos_token": "<|end▁of▁sentence|>",
15
+ "pad_token": "<|▁pad▁|>"
16
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|begin▁of▁sentence|>",
3
+ "clean_up_tokenization_spaces": false,
4
+ "eos_token": "<|end▁of▁sentence|>",
5
+ "model_max_length": 16384,
6
+ "pad_token": null,
7
+ "tokenizer_class": "LlamaTokenizer",
8
+ "unk_token": null,
9
+ "use_default_system_prompt": true
10
+ }