`SPLIT_EINSUM_V2` files
Browse files- coreml-prompthero-openjourney-v4-palettized_split_einsum_v2_compiled.zip +3 -0
- split_einsum_v2/compiled/SafetyChecker.mlmodelc/analytics/coremldata.bin +3 -0
- split_einsum_v2/compiled/SafetyChecker.mlmodelc/coremldata.bin +3 -0
- split_einsum_v2/compiled/SafetyChecker.mlmodelc/metadata.json +122 -0
- split_einsum_v2/compiled/SafetyChecker.mlmodelc/model.mil +0 -0
- split_einsum_v2/compiled/SafetyChecker.mlmodelc/weights/weight.bin +3 -0
- split_einsum_v2/compiled/TextEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- split_einsum_v2/compiled/TextEncoder.mlmodelc/coremldata.bin +3 -0
- split_einsum_v2/compiled/TextEncoder.mlmodelc/metadata.json +83 -0
- split_einsum_v2/compiled/TextEncoder.mlmodelc/model.mil +0 -0
- split_einsum_v2/compiled/TextEncoder.mlmodelc/weights/weight.bin +3 -0
- split_einsum_v2/compiled/Unet.mlmodelc/analytics/coremldata.bin +3 -0
- split_einsum_v2/compiled/Unet.mlmodelc/coremldata.bin +3 -0
- split_einsum_v2/compiled/Unet.mlmodelc/metadata.json +103 -0
- split_einsum_v2/compiled/Unet.mlmodelc/model.mil +0 -0
- split_einsum_v2/compiled/Unet.mlmodelc/weights/weight.bin +3 -0
- split_einsum_v2/compiled/VAEDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- split_einsum_v2/compiled/VAEDecoder.mlmodelc/coremldata.bin +3 -0
- split_einsum_v2/compiled/VAEDecoder.mlmodelc/metadata.json +75 -0
- split_einsum_v2/compiled/VAEDecoder.mlmodelc/model.mil +0 -0
- split_einsum_v2/compiled/VAEDecoder.mlmodelc/weights/weight.bin +3 -0
- split_einsum_v2/compiled/VAEEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- split_einsum_v2/compiled/VAEEncoder.mlmodelc/coremldata.bin +3 -0
- split_einsum_v2/compiled/VAEEncoder.mlmodelc/metadata.json +75 -0
- split_einsum_v2/compiled/VAEEncoder.mlmodelc/model.mil +0 -0
- split_einsum_v2/compiled/VAEEncoder.mlmodelc/weights/weight.bin +3 -0
- split_einsum_v2/compiled/merges.txt +0 -0
- split_einsum_v2/compiled/vocab.json +0 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_safety_checker.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_safety_checker.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_safety_checker.mlpackage/Manifest.json +18 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_text_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_text_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_text_encoder.mlpackage/Manifest.json +18 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_unet.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_unet.mlpackage/Manifest.json +18 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_decoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_decoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_decoder.mlpackage/Manifest.json +18 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_encoder.mlpackage/Manifest.json +18 -0
coreml-prompthero-openjourney-v4-palettized_split_einsum_v2_compiled.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dfca8f23f67679ae5992c362f7b1250669babc75f4779dbe1b31545257eaaa2d
|
3 |
+
size 1565716830
|
split_einsum_v2/compiled/SafetyChecker.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:84bf25ba4afe1a72fd423e8a30e0d71bbff4cf35e67c4d19a4fc7c5c0fecfb33
|
3 |
+
size 207
|
split_einsum_v2/compiled/SafetyChecker.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bcb86f19c3d032754923c443636cb64e65a680b6ea5a3fde03f39ab3a8e7f995
|
3 |
+
size 1405
|
split_einsum_v2/compiled/SafetyChecker.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32)",
|
11 |
+
"shortDescription" : "Identical to the input `images`. If safety checker detected any sensitive content, the corresponding image is replaced with a blank image (zeros)",
|
12 |
+
"shape" : "[]",
|
13 |
+
"name" : "filtered_images",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float32",
|
20 |
+
"formattedType" : "MultiArray (Float32)",
|
21 |
+
"shortDescription" : "Indicates whether the safety checker model found any sensitive content in the given image",
|
22 |
+
"shape" : "[]",
|
23 |
+
"name" : "has_nsfw_concepts",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float32",
|
30 |
+
"formattedType" : "MultiArray (Float32)",
|
31 |
+
"shortDescription" : "Concept scores are the scores before thresholding at zero yields the `has_nsfw_concepts` output. These scores can be used to tune the `adjustment` input",
|
32 |
+
"shape" : "[]",
|
33 |
+
"name" : "concept_scores",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"version" : "prompthero\/openjourney-v4",
|
38 |
+
"modelParameters" : [
|
39 |
+
|
40 |
+
],
|
41 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/prompthero\/openjourney-v4",
|
42 |
+
"specificationVersion" : 7,
|
43 |
+
"storagePrecision" : "Float16",
|
44 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
45 |
+
"mlProgramOperationTypeHistogram" : {
|
46 |
+
"Transpose" : 121,
|
47 |
+
"Ios16.scatterNd" : 1,
|
48 |
+
"Ios16.softmax" : 24,
|
49 |
+
"Ios16.linear" : 147,
|
50 |
+
"Ios16.add" : 51,
|
51 |
+
"Concat" : 1,
|
52 |
+
"Ios16.realDiv" : 1,
|
53 |
+
"Ios16.sigmoid" : 24,
|
54 |
+
"Ios16.reduceSum" : 3,
|
55 |
+
"Tile" : 4,
|
56 |
+
"Ios16.greater" : 4,
|
57 |
+
"Shape" : 1,
|
58 |
+
"ExpandDims" : 5,
|
59 |
+
"Ios16.cast" : 7,
|
60 |
+
"Ios16.clip" : 1,
|
61 |
+
"Ios16.conv" : 1,
|
62 |
+
"Ios16.abs" : 1,
|
63 |
+
"Ios16.layerNorm" : 50,
|
64 |
+
"Ios16.matmul" : 48,
|
65 |
+
"Ios16.pow" : 2,
|
66 |
+
"Ios16.reshape" : 193,
|
67 |
+
"SliceByIndex" : 2,
|
68 |
+
"Ios16.mul" : 73,
|
69 |
+
"NonZero" : 1
|
70 |
+
},
|
71 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
72 |
+
"isUpdatable" : "0",
|
73 |
+
"availability" : {
|
74 |
+
"macOS" : "13.0",
|
75 |
+
"tvOS" : "16.0",
|
76 |
+
"watchOS" : "9.0",
|
77 |
+
"iOS" : "16.0",
|
78 |
+
"macCatalyst" : "16.0"
|
79 |
+
},
|
80 |
+
"modelType" : {
|
81 |
+
"name" : "MLModelType_mlProgram"
|
82 |
+
},
|
83 |
+
"inputSchema" : [
|
84 |
+
{
|
85 |
+
"hasShapeFlexibility" : "0",
|
86 |
+
"isOptional" : "0",
|
87 |
+
"dataType" : "Float16",
|
88 |
+
"formattedType" : "MultiArray (Float16 1 × 3 × 224 × 224)",
|
89 |
+
"shortDescription" : "The normalized image input tensor resized to (224x224) in channels-first (BCHW) format",
|
90 |
+
"shape" : "[1, 3, 224, 224]",
|
91 |
+
"name" : "clip_input",
|
92 |
+
"type" : "MultiArray"
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"hasShapeFlexibility" : "0",
|
96 |
+
"isOptional" : "0",
|
97 |
+
"dataType" : "Float16",
|
98 |
+
"formattedType" : "MultiArray (Float16 1 × 512 × 512 × 3)",
|
99 |
+
"shortDescription" : "Output of the vae_decoder (512x512) in channels-last (BHWC) format",
|
100 |
+
"shape" : "[1, 512, 512, 3]",
|
101 |
+
"name" : "images",
|
102 |
+
"type" : "MultiArray"
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"hasShapeFlexibility" : "0",
|
106 |
+
"isOptional" : "0",
|
107 |
+
"dataType" : "Float16",
|
108 |
+
"formattedType" : "MultiArray (Float16 1)",
|
109 |
+
"shortDescription" : "Bias added to the concept scores to trade off increased recall for reduce precision in the safety checker classifier",
|
110 |
+
"shape" : "[1]",
|
111 |
+
"name" : "adjustment",
|
112 |
+
"type" : "MultiArray"
|
113 |
+
}
|
114 |
+
],
|
115 |
+
"userDefinedMetadata" : {
|
116 |
+
"com.github.apple.coremltools.version" : "7.0b1",
|
117 |
+
"com.github.apple.coremltools.source" : "torch==2.0.1"
|
118 |
+
},
|
119 |
+
"generatedClassName" : "Stable_Diffusion_version_prompthero_openjourney_v4_safety_checker",
|
120 |
+
"method" : "predict"
|
121 |
+
}
|
122 |
+
]
|
split_einsum_v2/compiled/SafetyChecker.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
split_einsum_v2/compiled/SafetyChecker.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31de10bc38b476d4fff1d1edd004e86474232f5fd2e3e5175ffbf92bf247cf61
|
3 |
+
size 607990114
|
split_einsum_v2/compiled/TextEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abbe841416d2fc714533632b71ec3acd5d4c3353b47c74005f70726a7c0a6823
|
3 |
+
size 207
|
split_einsum_v2/compiled/TextEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8bbd7a9021b023b0aceda54839072d5092c3c4d4fd69db9f6ce885f1c20603e7
|
3 |
+
size 815
|
split_einsum_v2/compiled/TextEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32)",
|
11 |
+
"shortDescription" : "The token embeddings as encoded by the Transformer model",
|
12 |
+
"shape" : "[]",
|
13 |
+
"name" : "last_hidden_state",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float32",
|
20 |
+
"formattedType" : "MultiArray (Float32)",
|
21 |
+
"shortDescription" : "The version of the `last_hidden_state` output after pooling",
|
22 |
+
"shape" : "[]",
|
23 |
+
"name" : "pooled_outputs",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"version" : "prompthero\/openjourney-v4",
|
28 |
+
"modelParameters" : [
|
29 |
+
|
30 |
+
],
|
31 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/prompthero\/openjourney-v4",
|
32 |
+
"specificationVersion" : 7,
|
33 |
+
"storagePrecision" : "Mixed (Float16, Palettized (6 bits))",
|
34 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
35 |
+
"mlProgramOperationTypeHistogram" : {
|
36 |
+
"Ios16.cast" : 3,
|
37 |
+
"Ios16.mul" : 36,
|
38 |
+
"Ios16.layerNorm" : 25,
|
39 |
+
"Ios16.constexprLutToDense" : 86,
|
40 |
+
"Stack" : 1,
|
41 |
+
"Transpose" : 60,
|
42 |
+
"Ios16.sigmoid" : 12,
|
43 |
+
"Ios16.linear" : 72,
|
44 |
+
"Ios16.add" : 37,
|
45 |
+
"Ios16.matmul" : 24,
|
46 |
+
"Ios16.softmax" : 12,
|
47 |
+
"Ios16.gatherNd" : 1,
|
48 |
+
"Ios16.gather" : 1,
|
49 |
+
"Ios16.reshape" : 120,
|
50 |
+
"Ios16.reduceArgmax" : 1
|
51 |
+
},
|
52 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
53 |
+
"isUpdatable" : "0",
|
54 |
+
"availability" : {
|
55 |
+
"macOS" : "13.0",
|
56 |
+
"tvOS" : "16.0",
|
57 |
+
"watchOS" : "9.0",
|
58 |
+
"iOS" : "16.0",
|
59 |
+
"macCatalyst" : "16.0"
|
60 |
+
},
|
61 |
+
"modelType" : {
|
62 |
+
"name" : "MLModelType_mlProgram"
|
63 |
+
},
|
64 |
+
"inputSchema" : [
|
65 |
+
{
|
66 |
+
"hasShapeFlexibility" : "0",
|
67 |
+
"isOptional" : "0",
|
68 |
+
"dataType" : "Float32",
|
69 |
+
"formattedType" : "MultiArray (Float32 1 × 77)",
|
70 |
+
"shortDescription" : "The token ids that represent the input text",
|
71 |
+
"shape" : "[1, 77]",
|
72 |
+
"name" : "input_ids",
|
73 |
+
"type" : "MultiArray"
|
74 |
+
}
|
75 |
+
],
|
76 |
+
"userDefinedMetadata" : {
|
77 |
+
"com.github.apple.coremltools.version" : "7.0b1",
|
78 |
+
"com.github.apple.coremltools.source" : "torch==2.0.1"
|
79 |
+
},
|
80 |
+
"generatedClassName" : "Stable_Diffusion_version_prompthero_openjourney_v4_text_encoder",
|
81 |
+
"method" : "predict"
|
82 |
+
}
|
83 |
+
]
|
split_einsum_v2/compiled/TextEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
split_einsum_v2/compiled/TextEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d0048df441ec6a16b4341701dcae5d8a1b4bbb6ae455243c1e32d2d88e5b91b7
|
3 |
+
size 139866304
|
split_einsum_v2/compiled/Unet.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a49696b55778e9ed7599d8f331ba96ce29b991a7be6174464a68bc37fdc74346
|
3 |
+
size 207
|
split_einsum_v2/compiled/Unet.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fec7dc5d71ebf264b7cd77fc2f2498668a02941838ed4c46205c3b5b72a61e0a
|
3 |
+
size 1197
|
split_einsum_v2/compiled/Unet.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32)",
|
11 |
+
"shortDescription" : "Same shape and dtype as the `sample` input. The predicted noise to facilitate the reverse diffusion (denoising) process",
|
12 |
+
"shape" : "[]",
|
13 |
+
"name" : "noise_pred",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"version" : "prompthero\/openjourney-v4",
|
18 |
+
"modelParameters" : [
|
19 |
+
|
20 |
+
],
|
21 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/prompthero\/openjourney-v4",
|
22 |
+
"specificationVersion" : 7,
|
23 |
+
"storagePrecision" : "Mixed (Float16, Palettized (6 bits))",
|
24 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
25 |
+
"mlProgramOperationTypeHistogram" : {
|
26 |
+
"Transpose" : 32,
|
27 |
+
"UpsampleNearestNeighbor" : 3,
|
28 |
+
"Ios16.reduceMean" : 218,
|
29 |
+
"Ios16.sin" : 1,
|
30 |
+
"Ios16.softmax" : 896,
|
31 |
+
"Split" : 16,
|
32 |
+
"Ios16.add" : 265,
|
33 |
+
"Concat" : 206,
|
34 |
+
"Ios16.realDiv" : 61,
|
35 |
+
"Ios16.square" : 61,
|
36 |
+
"ExpandDims" : 3,
|
37 |
+
"Ios16.sub" : 109,
|
38 |
+
"Ios16.cast" : 1,
|
39 |
+
"Ios16.conv" : 282,
|
40 |
+
"Ios16.constexprLutToDense" : 310,
|
41 |
+
"Ios16.einsum" : 1792,
|
42 |
+
"Ios16.gelu" : 16,
|
43 |
+
"Ios16.batchNorm" : 61,
|
44 |
+
"Ios16.reshape" : 154,
|
45 |
+
"Ios16.rsqrt" : 48,
|
46 |
+
"Ios16.silu" : 47,
|
47 |
+
"Ios16.sqrt" : 61,
|
48 |
+
"Ios16.mul" : 1057,
|
49 |
+
"Ios16.cos" : 1,
|
50 |
+
"SliceByIndex" : 1570
|
51 |
+
},
|
52 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
53 |
+
"isUpdatable" : "0",
|
54 |
+
"availability" : {
|
55 |
+
"macOS" : "13.0",
|
56 |
+
"tvOS" : "16.0",
|
57 |
+
"watchOS" : "9.0",
|
58 |
+
"iOS" : "16.0",
|
59 |
+
"macCatalyst" : "16.0"
|
60 |
+
},
|
61 |
+
"modelType" : {
|
62 |
+
"name" : "MLModelType_mlProgram"
|
63 |
+
},
|
64 |
+
"inputSchema" : [
|
65 |
+
{
|
66 |
+
"hasShapeFlexibility" : "0",
|
67 |
+
"isOptional" : "0",
|
68 |
+
"dataType" : "Float16",
|
69 |
+
"formattedType" : "MultiArray (Float16 2 × 4 × 64 × 64)",
|
70 |
+
"shortDescription" : "The low resolution latent feature maps being denoised through reverse diffusion",
|
71 |
+
"shape" : "[2, 4, 64, 64]",
|
72 |
+
"name" : "sample",
|
73 |
+
"type" : "MultiArray"
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"hasShapeFlexibility" : "0",
|
77 |
+
"isOptional" : "0",
|
78 |
+
"dataType" : "Float16",
|
79 |
+
"formattedType" : "MultiArray (Float16 2)",
|
80 |
+
"shortDescription" : "A value emitted by the associated scheduler object to condition the model on a given noise schedule",
|
81 |
+
"shape" : "[2]",
|
82 |
+
"name" : "timestep",
|
83 |
+
"type" : "MultiArray"
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"hasShapeFlexibility" : "0",
|
87 |
+
"isOptional" : "0",
|
88 |
+
"dataType" : "Float16",
|
89 |
+
"formattedType" : "MultiArray (Float16 2 × 768 × 1 × 77)",
|
90 |
+
"shortDescription" : "Output embeddings from the associated text_encoder model to condition to generated image on text. A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. Shorter text does not reduce computation.",
|
91 |
+
"shape" : "[2, 768, 1, 77]",
|
92 |
+
"name" : "encoder_hidden_states",
|
93 |
+
"type" : "MultiArray"
|
94 |
+
}
|
95 |
+
],
|
96 |
+
"userDefinedMetadata" : {
|
97 |
+
"com.github.apple.coremltools.version" : "7.0b1",
|
98 |
+
"com.github.apple.coremltools.source" : "torch==2.0.1"
|
99 |
+
},
|
100 |
+
"generatedClassName" : "Stable_Diffusion_version_prompthero_openjourney_v4_unet",
|
101 |
+
"method" : "predict"
|
102 |
+
}
|
103 |
+
]
|
split_einsum_v2/compiled/Unet.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
split_einsum_v2/compiled/Unet.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1b888d218ede10d529062ef12a7745660868d720dae9cc2859d1acb668faf6e
|
3 |
+
size 645167616
|
split_einsum_v2/compiled/VAEDecoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a6a82d8d8b0c7be51ada6554a7779d4aeafafd98972d3e70a899d5b22483cd6
|
3 |
+
size 207
|
split_einsum_v2/compiled/VAEDecoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e4c7bcfdfd4f3998885121a8195b25908d44edb63d17fe688f69918abf9dd62
|
3 |
+
size 745
|
split_einsum_v2/compiled/VAEDecoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32)",
|
11 |
+
"shortDescription" : "Generated image normalized to range [-1, 1]",
|
12 |
+
"shape" : "[]",
|
13 |
+
"name" : "image",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"version" : "prompthero\/openjourney-v4",
|
18 |
+
"modelParameters" : [
|
19 |
+
|
20 |
+
],
|
21 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/prompthero\/openjourney-v4",
|
22 |
+
"specificationVersion" : 7,
|
23 |
+
"storagePrecision" : "Float16",
|
24 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
25 |
+
"mlProgramOperationTypeHistogram" : {
|
26 |
+
"Ios16.cast" : 1,
|
27 |
+
"Ios16.mul" : 1,
|
28 |
+
"Ios16.sqrt" : 30,
|
29 |
+
"Ios16.sub" : 30,
|
30 |
+
"Transpose" : 7,
|
31 |
+
"UpsampleNearestNeighbor" : 3,
|
32 |
+
"Ios16.conv" : 36,
|
33 |
+
"Ios16.add" : 45,
|
34 |
+
"Ios16.linear" : 4,
|
35 |
+
"Ios16.matmul" : 2,
|
36 |
+
"Ios16.realDiv" : 30,
|
37 |
+
"Ios16.reduceMean" : 60,
|
38 |
+
"Ios16.softmax" : 1,
|
39 |
+
"Ios16.batchNorm" : 30,
|
40 |
+
"Ios16.square" : 30,
|
41 |
+
"Ios16.reshape" : 70,
|
42 |
+
"Ios16.silu" : 29
|
43 |
+
},
|
44 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
45 |
+
"isUpdatable" : "0",
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"watchOS" : "9.0",
|
50 |
+
"iOS" : "16.0",
|
51 |
+
"macCatalyst" : "16.0"
|
52 |
+
},
|
53 |
+
"modelType" : {
|
54 |
+
"name" : "MLModelType_mlProgram"
|
55 |
+
},
|
56 |
+
"inputSchema" : [
|
57 |
+
{
|
58 |
+
"hasShapeFlexibility" : "0",
|
59 |
+
"isOptional" : "0",
|
60 |
+
"dataType" : "Float16",
|
61 |
+
"formattedType" : "MultiArray (Float16 1 × 4 × 64 × 64)",
|
62 |
+
"shortDescription" : "The denoised latent embeddings from the unet model after the last step of reverse diffusion",
|
63 |
+
"shape" : "[1, 4, 64, 64]",
|
64 |
+
"name" : "z",
|
65 |
+
"type" : "MultiArray"
|
66 |
+
}
|
67 |
+
],
|
68 |
+
"userDefinedMetadata" : {
|
69 |
+
"com.github.apple.coremltools.version" : "7.0b1",
|
70 |
+
"com.github.apple.coremltools.source" : "torch==2.0.1"
|
71 |
+
},
|
72 |
+
"generatedClassName" : "Stable_Diffusion_version_prompthero_openjourney_v4_vae_decoder",
|
73 |
+
"method" : "predict"
|
74 |
+
}
|
75 |
+
]
|
split_einsum_v2/compiled/VAEDecoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
split_einsum_v2/compiled/VAEDecoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27ecb93f85419b51c9c17bed32d40c604ec814d197f6544b42d81c38ed222cd3
|
3 |
+
size 98993280
|
split_einsum_v2/compiled/VAEEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:46bf9ed4189cb7ef51d2df2c5b508eed67da2826fdf8a9b8cc749e1e6ac63267
|
3 |
+
size 207
|
split_einsum_v2/compiled/VAEEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bac294963b14266b301ed93df978c3bacb199ace104f00e7eadedca06a6f0ddc
|
3 |
+
size 751
|
split_einsum_v2/compiled/VAEEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32)",
|
11 |
+
"shortDescription" : "The latent embeddings from the unet model from the input image.",
|
12 |
+
"shape" : "[]",
|
13 |
+
"name" : "latent",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"version" : "prompthero\/openjourney-v4",
|
18 |
+
"modelParameters" : [
|
19 |
+
|
20 |
+
],
|
21 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/prompthero\/openjourney-v4",
|
22 |
+
"specificationVersion" : 7,
|
23 |
+
"storagePrecision" : "Float16",
|
24 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
25 |
+
"mlProgramOperationTypeHistogram" : {
|
26 |
+
"Pad" : 3,
|
27 |
+
"Ios16.cast" : 1,
|
28 |
+
"Ios16.mul" : 1,
|
29 |
+
"Ios16.sqrt" : 22,
|
30 |
+
"Ios16.sub" : 22,
|
31 |
+
"Transpose" : 7,
|
32 |
+
"Ios16.conv" : 28,
|
33 |
+
"Ios16.add" : 33,
|
34 |
+
"Ios16.linear" : 4,
|
35 |
+
"Ios16.matmul" : 2,
|
36 |
+
"Ios16.realDiv" : 22,
|
37 |
+
"Ios16.reduceMean" : 44,
|
38 |
+
"Ios16.softmax" : 1,
|
39 |
+
"Ios16.batchNorm" : 22,
|
40 |
+
"Ios16.square" : 22,
|
41 |
+
"Ios16.reshape" : 54,
|
42 |
+
"Ios16.silu" : 21
|
43 |
+
},
|
44 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
45 |
+
"isUpdatable" : "0",
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"watchOS" : "9.0",
|
50 |
+
"iOS" : "16.0",
|
51 |
+
"macCatalyst" : "16.0"
|
52 |
+
},
|
53 |
+
"modelType" : {
|
54 |
+
"name" : "MLModelType_mlProgram"
|
55 |
+
},
|
56 |
+
"inputSchema" : [
|
57 |
+
{
|
58 |
+
"hasShapeFlexibility" : "0",
|
59 |
+
"isOptional" : "0",
|
60 |
+
"dataType" : "Float16",
|
61 |
+
"formattedType" : "MultiArray (Float16 1 × 3 × 512 × 512)",
|
62 |
+
"shortDescription" : "The input image to base the initial latents on normalized to range [-1, 1]",
|
63 |
+
"shape" : "[1, 3, 512, 512]",
|
64 |
+
"name" : "z",
|
65 |
+
"type" : "MultiArray"
|
66 |
+
}
|
67 |
+
],
|
68 |
+
"userDefinedMetadata" : {
|
69 |
+
"com.github.apple.coremltools.version" : "7.0b1",
|
70 |
+
"com.github.apple.coremltools.source" : "torch==2.0.1"
|
71 |
+
},
|
72 |
+
"generatedClassName" : "Stable_Diffusion_version_prompthero_openjourney_v4_vae_encoder",
|
73 |
+
"method" : "predict"
|
74 |
+
}
|
75 |
+
]
|
split_einsum_v2/compiled/VAEEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
split_einsum_v2/compiled/VAEEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b24a2cab94a6700c3bf26b670c2b10340aba3cff8b28c9efc8dead6f925a24fa
|
3 |
+
size 68338112
|
split_einsum_v2/compiled/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
split_einsum_v2/compiled/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_safety_checker.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d1122edeea517cdfbe6b574b30eaefa6c5fb33bc3065be9b7c5279618f322e4
|
3 |
+
size 319155
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_safety_checker.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31de10bc38b476d4fff1d1edd004e86474232f5fd2e3e5175ffbf92bf247cf61
|
3 |
+
size 607990114
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_safety_checker.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"1357805A-6142-4432-A386-ECA6B8773F28": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Specification",
|
7 |
+
"name": "model.mlmodel",
|
8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
9 |
+
},
|
10 |
+
"B3809348-0AA0-4CB1-BD84-9DF798183D64": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Weights",
|
13 |
+
"name": "weights",
|
14 |
+
"path": "com.apple.CoreML/weights"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "1357805A-6142-4432-A386-ECA6B8773F28"
|
18 |
+
}
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_text_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8c1cf46cce2e347935d76c503c082512602f7b86e2bb3957061e5ab8b571979d
|
3 |
+
size 174133
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_text_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d0048df441ec6a16b4341701dcae5d8a1b4bbb6ae455243c1e32d2d88e5b91b7
|
3 |
+
size 139866304
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_text_encoder.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"C0B96BD0-F5A9-452F-B6D0-99EC139D826E": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Weights",
|
7 |
+
"name": "weights",
|
8 |
+
"path": "com.apple.CoreML/weights"
|
9 |
+
},
|
10 |
+
"F93CE9EF-2190-487D-B52B-064A305A3730": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Specification",
|
13 |
+
"name": "model.mlmodel",
|
14 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "F93CE9EF-2190-487D-B52B-064A305A3730"
|
18 |
+
}
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_unet.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fe39a0c8d948d477a0c7b6426fbd5cfd81208a7389ac317c53cf3d28f5fc9da
|
3 |
+
size 2641708
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1b888d218ede10d529062ef12a7745660868d720dae9cc2859d1acb668faf6e
|
3 |
+
size 645167616
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_unet.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"8A4847DE-B063-4580-8628-E18143132A51": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Specification",
|
7 |
+
"name": "model.mlmodel",
|
8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
9 |
+
},
|
10 |
+
"F77F90E2-63D9-4DE5-88B7-52905B21A932": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Weights",
|
13 |
+
"name": "weights",
|
14 |
+
"path": "com.apple.CoreML/weights"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "8A4847DE-B063-4580-8628-E18143132A51"
|
18 |
+
}
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_decoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d4c86d8b5710e7e4a7153ecd292e2742ff85c99a8008db126c7e3d8ba1f0b70
|
3 |
+
size 156802
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_decoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27ecb93f85419b51c9c17bed32d40c604ec814d197f6544b42d81c38ed222cd3
|
3 |
+
size 98993280
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_decoder.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"4530907E-4811-4B9D-899A-EAE781C01A9D": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Weights",
|
7 |
+
"name": "weights",
|
8 |
+
"path": "com.apple.CoreML/weights"
|
9 |
+
},
|
10 |
+
"4F61D630-3ABE-4616-889F-96B841170F08": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Specification",
|
13 |
+
"name": "model.mlmodel",
|
14 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "4F61D630-3ABE-4616-889F-96B841170F08"
|
18 |
+
}
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af9d8c6ffca95ca2f8d0745ba4a448130a327b54ff22ef73f7cce5d766eeb89a
|
3 |
+
size 120671
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b24a2cab94a6700c3bf26b670c2b10340aba3cff8b28c9efc8dead6f925a24fa
|
3 |
+
size 68338112
|
split_einsum_v2/packages/Stable_Diffusion_version_prompthero_openjourney-v4_vae_encoder.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"F3A948D1-F16C-46C2-87C4-9F89435CB9A6": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Specification",
|
7 |
+
"name": "model.mlmodel",
|
8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
9 |
+
},
|
10 |
+
"FE630411-11F6-454A-88E1-24B4B5EA2E22": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Weights",
|
13 |
+
"name": "weights",
|
14 |
+
"path": "com.apple.CoreML/weights"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "F3A948D1-F16C-46C2-87C4-9F89435CB9A6"
|
18 |
+
}
|