Spaces:
Sleeping
Sleeping
Add kernel size slider
Browse files- chatbot_constructor.py +3 -2
chatbot_constructor.py
CHANGED
@@ -15,7 +15,7 @@ os.mkdir("cache")
|
|
15 |
def hash_str(data: str):
|
16 |
return hashlib.md5(data.encode('utf-8')).hexdigest()
|
17 |
|
18 |
-
def train(message: str = "", epochs: int = 16, learning_rate: float = 0.001, emb_size: int = 128, inp_len: int = 16, kernels_count: int = 8, data: str = ""):
|
19 |
data_hash = None
|
20 |
if "→" not in data or "\n" not in data:
|
21 |
if data in os.listdir("cache"):
|
@@ -39,7 +39,7 @@ def train(message: str = "", epochs: int = 16, learning_rate: float = 0.001, emb
|
|
39 |
emb_layer = Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len)(input_layer)
|
40 |
attn_layer = MultiHeadAttention(num_heads=4, key_dim=128)(emb_layer, emb_layer, emb_layer)
|
41 |
noise_layer = GaussianNoise(0.1)(attn_layer)
|
42 |
-
conv1_layer = Conv1D(kernels_count,
|
43 |
conv2_layer = Conv1D(16, 4, padding='same', activation='relu', strides=1)(conv1_layer)
|
44 |
conv3_layer = Conv1D(8, 2, padding='same', activation='relu', strides=1)(conv2_layer)
|
45 |
flatten_layer = Flatten()(conv3_layer)
|
@@ -85,6 +85,7 @@ if __name__ == "__main__":
|
|
85 |
gr.inputs.Slider(1, 256, default=100, step=1, label="Embedding size"),
|
86 |
gr.inputs.Slider(1, 128, default=16, step=1, label="Input Length"),
|
87 |
gr.inputs.Slider(1, 128, default=64, step=1, label="Convolution kernel count"),
|
|
|
88 |
"text"],
|
89 |
outputs="text")
|
90 |
iface.launch()
|
|
|
15 |
def hash_str(data: str):
|
16 |
return hashlib.md5(data.encode('utf-8')).hexdigest()
|
17 |
|
18 |
+
def train(message: str = "", epochs: int = 16, learning_rate: float = 0.001, emb_size: int = 128, inp_len: int = 16, kernels_count: int = 8, kernel_size: int = 8, data: str = ""):
|
19 |
data_hash = None
|
20 |
if "→" not in data or "\n" not in data:
|
21 |
if data in os.listdir("cache"):
|
|
|
39 |
emb_layer = Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len)(input_layer)
|
40 |
attn_layer = MultiHeadAttention(num_heads=4, key_dim=128)(emb_layer, emb_layer, emb_layer)
|
41 |
noise_layer = GaussianNoise(0.1)(attn_layer)
|
42 |
+
conv1_layer = Conv1D(kernels_count, kernel_size, padding='same', activation='relu', strides=1, input_shape=(64, 128))(noise_layer)
|
43 |
conv2_layer = Conv1D(16, 4, padding='same', activation='relu', strides=1)(conv1_layer)
|
44 |
conv3_layer = Conv1D(8, 2, padding='same', activation='relu', strides=1)(conv2_layer)
|
45 |
flatten_layer = Flatten()(conv3_layer)
|
|
|
85 |
gr.inputs.Slider(1, 256, default=100, step=1, label="Embedding size"),
|
86 |
gr.inputs.Slider(1, 128, default=16, step=1, label="Input Length"),
|
87 |
gr.inputs.Slider(1, 128, default=64, step=1, label="Convolution kernel count"),
|
88 |
+
gr.inputs.Slider(1, 16, default=8, step=1, label="Convolution kernel size"),
|
89 |
"text"],
|
90 |
outputs="text")
|
91 |
iface.launch()
|