`;
+ return $(hiddenBlock);
+}
+
+function getCharacterBlock(item, id) {
+ let this_avatar = default_avatar;
+ if (item.avatar != 'none') {
+ this_avatar = getThumbnailUrl('avatar', item.avatar);
+ }
+ // Populate the template
+ const template = $('#character_template .character_select').clone();
+ template.attr({ 'chid': id, 'id': `CharID${id}` });
+ template.find('img').attr('src', this_avatar).attr('alt', item.name);
+ template.find('.avatar').attr('title', `[Character] ${item.name}\nFile: ${item.avatar}`);
+ template.find('.ch_name').text(item.name).attr('title', `[Character] ${item.name}`);
+ if (power_user.show_card_avatar_urls) {
+ template.find('.ch_avatar_url').text(item.avatar);
+ }
+ template.find('.ch_fav_icon').css('display', 'none');
+ template.toggleClass('is_fav', item.fav || item.fav == 'true');
+ template.find('.ch_fav').val(item.fav);
+
+ const description = item.data?.creator_notes || '';
+ if (description) {
+ template.find('.ch_description').text(description);
+ }
+ else {
+ template.find('.ch_description').hide();
+ }
+
+ const auxFieldName = power_user.aux_field || 'character_version';
+ const auxFieldValue = (item.data && item.data[auxFieldName]) || '';
+ if (auxFieldValue) {
+ template.find('.character_version').text(auxFieldValue);
+ }
+ else {
+ template.find('.character_version').hide();
+ }
+
+ // Display inline tags
+ const tagsElement = template.find('.tags');
+ printTagList(tagsElement, { forEntityOrKey: id });
+
+ // Add to the list
+ return template;
+}
+
+/**
+ * Prints the global character list, optionally doing a full refresh of the list
+ * Use this function whenever the reprinting of the character list is the primary focus, otherwise using `printCharactersDebounced` is preferred for a cleaner, non-blocking experience.
+ *
+ * The printing will also always reprint all filter options of the global list, to keep them up to date.
+ *
+ * @param {boolean} fullRefresh - If true, the list is fully refreshed and the navigation is being reset
+ */
+export async function printCharacters(fullRefresh = false) {
+ const storageKey = 'Characters_PerPage';
+ const listId = '#rm_print_characters_block';
+
+ let currentScrollTop = $(listId).scrollTop();
+
+ if (fullRefresh) {
+ saveCharactersPage = 0;
+ currentScrollTop = 0;
+ await delay(1);
+ }
+
+ // Before printing the personas, we check if we should enable/disable search sorting
+ verifyCharactersSearchSortRule();
+
+ // We are actually always reprinting filters, as it "doesn't hurt", and this way they are always up to date
+ printTagFilters(tag_filter_type.character);
+ printTagFilters(tag_filter_type.group_member);
+
+ // We are also always reprinting the lists on character/group edit window, as these ones doesn't get updated otherwise
+ applyTagsOnCharacterSelect();
+ applyTagsOnGroupSelect();
+
+ const entities = getEntitiesList({ doFilter: true });
+
+ $('#rm_print_characters_pagination').pagination({
+ dataSource: entities,
+ pageSize: Number(localStorage.getItem(storageKey)) || per_page_default,
+ sizeChangerOptions: [10, 25, 50, 100, 250, 500, 1000],
+ pageRange: 1,
+ pageNumber: saveCharactersPage || 1,
+ position: 'top',
+ showPageNumbers: false,
+ showSizeChanger: true,
+ prevText: '<',
+ nextText: '>',
+ formatNavigator: PAGINATION_TEMPLATE,
+ showNavigator: true,
+ callback: function (/** @type {Entity[]} */ data) {
+ $(listId).empty();
+ if (power_user.bogus_folders && isBogusFolderOpen()) {
+ $(listId).append(getBackBlock());
+ }
+ if (!data.length) {
+ $(listId).append(getEmptyBlock());
+ }
+ let displayCount = 0;
+ for (const i of data) {
+ switch (i.type) {
+ case 'character':
+ $(listId).append(getCharacterBlock(i.item, i.id));
+ displayCount++;
+ break;
+ case 'group':
+ $(listId).append(getGroupBlock(i.item));
+ displayCount++;
+ break;
+ case 'tag':
+ $(listId).append(getTagBlock(i.item, i.entities, i.hidden, i.isUseless));
+ break;
+ }
+ }
+
+ const hidden = (characters.length + groups.length) - displayCount;
+ if (hidden > 0 && entitiesFilter.hasAnyFilter()) {
+ $(listId).append(getHiddenBlock(hidden));
+ }
+
+ eventSource.emit(event_types.CHARACTER_PAGE_LOADED);
+ },
+ afterSizeSelectorChange: function (e) {
+ localStorage.setItem(storageKey, e.target.value);
+ },
+ afterPaging: function (e) {
+ saveCharactersPage = e;
+ },
+ afterRender: function () {
+ $(listId).scrollTop(currentScrollTop);
+ },
+ });
+
+ favsToHotswap();
+}
+
+/** Checks the state of the current search, and adds/removes the search sorting option accordingly */
+function verifyCharactersSearchSortRule() {
+ const searchTerm = entitiesFilter.getFilterData(FILTER_TYPES.SEARCH);
+ const searchOption = $('#character_sort_order option[data-field="search"]');
+ const selector = $('#character_sort_order');
+ const isHidden = searchOption.attr('hidden') !== undefined;
+
+ // If we have a search term, we are displaying the sorting option for it
+ if (searchTerm && isHidden) {
+ searchOption.removeAttr('hidden');
+ searchOption.prop('selected', true);
+ flashHighlight(selector);
+ }
+ // If search got cleared, we make sure to hide the option and go back to the one before
+ if (!searchTerm && !isHidden) {
+ searchOption.attr('hidden', '');
+ $(`#character_sort_order option[data-order="${power_user.sort_order}"][data-field="${power_user.sort_field}"]`).prop('selected', true);
+ }
+}
+
+/** @typedef {object} Character - A character */
+/** @typedef {object} Group - A group */
+
+/**
+ * @typedef {object} Entity - Object representing a display entity
+ * @property {Character|Group|import('./scripts/tags.js').Tag|*} item - The item
+ * @property {string|number} id - The id
+ * @property {'character'|'group'|'tag'} type - The type of this entity (character, group, tag)
+ * @property {Entity[]?} [entities=null] - An optional list of entities relevant for this item
+ * @property {number?} [hidden=null] - An optional number representing how many hidden entities this entity contains
+ * @property {boolean?} [isUseless=null] - Specifies if the entity is useless (not relevant, but should still be displayed for consistency) and should be displayed greyed out
+ */
+
+/**
+ * Converts the given character to its entity representation
+ *
+ * @param {Character} character - The character
+ * @param {string|number} id - The id of this character
+ * @returns {Entity} The entity for this character
+ */
+export function characterToEntity(character, id) {
+ return { item: character, id, type: 'character' };
+}
+
+/**
+ * Converts the given group to its entity representation
+ *
+ * @param {Group} group - The group
+ * @returns {Entity} The entity for this group
+ */
+export function groupToEntity(group) {
+ return { item: group, id: group.id, type: 'group' };
+}
+
+/**
+ * Converts the given tag to its entity representation
+ *
+ * @param {import('./scripts/tags.js').Tag} tag - The tag
+ * @returns {Entity} The entity for this tag
+ */
+export function tagToEntity(tag) {
+ return { item: structuredClone(tag), id: tag.id, type: 'tag', entities: [] };
+}
+
+/**
+ * Builds the full list of all entities available
+ *
+ * They will be correctly marked and filtered.
+ *
+ * @param {object} param0 - Optional parameters
+ * @param {boolean} [param0.doFilter] - Whether this entity list should already be filtered based on the global filters
+ * @param {boolean} [param0.doSort] - Whether the entity list should be sorted when returned
+ * @returns {Entity[]} All entities
+ */
+export function getEntitiesList({ doFilter = false, doSort = true } = {}) {
+ let entities = [
+ ...characters.map((item, index) => characterToEntity(item, index)),
+ ...groups.map(item => groupToEntity(item)),
+ ...(power_user.bogus_folders ? tags.filter(isBogusFolder).sort(compareTagsForSort).map(item => tagToEntity(item)) : []),
+ ];
+
+ // We need to do multiple filter runs in a specific order, otherwise different settings might override each other
+ // and screw up tags and search filter, sub lists or similar.
+ // The specific filters are written inside the "filterByTagState" method and its different parameters.
+ // Generally what we do is the following:
+ // 1. First swipe over the list to remove the most obvious things
+ // 2. Build sub entity lists for all folders, filtering them similarly to the second swipe
+ // 3. We do the last run, where global filters are applied, and the search filters last
+
+ // First run filters, that will hide what should never be displayed
+ if (doFilter) {
+ entities = filterByTagState(entities);
+ }
+
+ // Run over all entities between first and second filter to save some states
+ for (const entity of entities) {
+ // For folders, we remember the sub entities so they can be displayed later, even if they might be filtered
+ // Those sub entities should be filtered and have the search filters applied too
+ if (entity.type === 'tag') {
+ let subEntities = filterByTagState(entities, { subForEntity: entity, filterHidden: false });
+ const subCount = subEntities.length;
+ subEntities = filterByTagState(entities, { subForEntity: entity });
+ if (doFilter) {
+ // sub entities filter "hacked" because folder filter should not be applied there, so even in "only folders" mode characters show up
+ subEntities = entitiesFilter.applyFilters(subEntities, { clearScoreCache: false, tempOverrides: { [FILTER_TYPES.FOLDER]: FILTER_STATES.UNDEFINED }, clearFuzzySearchCaches: false });
+ }
+ if (doSort) {
+ sortEntitiesList(subEntities, false);
+ }
+ entity.entities = subEntities;
+ entity.hidden = subCount - subEntities.length;
+ }
+ }
+
+ // Second run filters, hiding whatever should be filtered later
+ if (doFilter) {
+ const beforeFinalEntities = filterByTagState(entities, { globalDisplayFilters: true });
+ entities = entitiesFilter.applyFilters(beforeFinalEntities, { clearFuzzySearchCaches: false });
+
+ // Magic for folder filter. If that one is enabled, and no folders are display anymore, we remove that filter to actually show the characters.
+ if (isFilterState(entitiesFilter.getFilterData(FILTER_TYPES.FOLDER), FILTER_STATES.SELECTED) && entities.filter(x => x.type == 'tag').length == 0) {
+ entities = entitiesFilter.applyFilters(beforeFinalEntities, { tempOverrides: { [FILTER_TYPES.FOLDER]: FILTER_STATES.UNDEFINED }, clearFuzzySearchCaches: false });
+ }
+ }
+
+ // Final step, updating some properties after the last filter run
+ const nonTagEntitiesCount = entities.filter(entity => entity.type !== 'tag').length;
+ for (const entity of entities) {
+ if (entity.type === 'tag') {
+ if (entity.entities?.length == nonTagEntitiesCount) entity.isUseless = true;
+ }
+ }
+
+ // Sort before returning if requested
+ if (doSort) {
+ sortEntitiesList(entities, false);
+ }
+ entitiesFilter.clearFuzzySearchCaches();
+ return entities;
+}
+
+export async function getOneCharacter(avatarUrl) {
+ const response = await fetch('/api/characters/get', {
+ method: 'POST',
+ headers: getRequestHeaders(),
+ body: JSON.stringify({
+ avatar_url: avatarUrl,
+ }),
+ });
+
+ if (response.ok) {
+ const getData = await response.json();
+ getData['name'] = DOMPurify.sanitize(getData['name']);
+ getData['chat'] = String(getData['chat']);
+
+ const indexOf = characters.findIndex(x => x.avatar === avatarUrl);
+
+ if (indexOf !== -1) {
+ characters[indexOf] = getData;
+ } else {
+ toastr.error(t`Character ${avatarUrl} not found in the list`, t`Error`, { timeOut: 5000, preventDuplicates: true });
+ }
+ }
+}
+
+function getCharacterSource(chId = this_chid) {
+ const character = characters[chId];
+
+ if (!character) {
+ return '';
+ }
+
+ const chubId = characters[chId]?.data?.extensions?.chub?.full_path;
+
+ if (chubId) {
+ return `https://chub.ai/characters/${chubId}`;
+ }
+
+ const pygmalionId = characters[chId]?.data?.extensions?.pygmalion_id;
+
+ if (pygmalionId) {
+ return `https://pygmalion.chat/${pygmalionId}`;
+ }
+
+ const githubRepo = characters[chId]?.data?.extensions?.github_repo;
+
+ if (githubRepo) {
+ return `https://github.com/${githubRepo}`;
+ }
+
+ const sourceUrl = characters[chId]?.data?.extensions?.source_url;
+
+ if (sourceUrl) {
+ return sourceUrl;
+ }
+
+ const risuId = characters[chId]?.data?.extensions?.risuai?.source;
+
+ if (Array.isArray(risuId) && risuId.length && typeof risuId[0] === 'string' && risuId[0].startsWith('risurealm:')) {
+ const realmId = risuId[0].split(':')[1];
+ return `https://realm.risuai.net/character/${realmId}`;
+ }
+
+ return '';
+}
+
+export async function getCharacters() {
+ const response = await fetch('/api/characters/all', {
+ method: 'POST',
+ headers: getRequestHeaders(),
+ body: JSON.stringify({
+ '': '',
+ }),
+ });
+ if (response.ok === true) {
+ characters.splice(0, characters.length);
+ const getData = await response.json();
+ for (let i = 0; i < getData.length; i++) {
+ characters[i] = getData[i];
+ characters[i]['name'] = DOMPurify.sanitize(characters[i]['name']);
+
+ // For dropped-in cards
+ if (!characters[i]['chat']) {
+ characters[i]['chat'] = `${characters[i]['name']} - ${humanizedDateTime()}`;
+ }
+
+ characters[i]['chat'] = String(characters[i]['chat']);
+ }
+ if (this_chid !== undefined) {
+ $('#avatar_url_pole').val(characters[this_chid].avatar);
+ }
+
+ await getGroups();
+ await printCharacters(true);
+ }
+}
+
+async function delChat(chatfile) {
+ const response = await fetch('/api/chats/delete', {
+ method: 'POST',
+ headers: getRequestHeaders(),
+ body: JSON.stringify({
+ chatfile: chatfile,
+ avatar_url: characters[this_chid].avatar,
+ }),
+ });
+ if (response.ok === true) {
+ // choose another chat if current was deleted
+ const name = chatfile.replace('.jsonl', '');
+ if (name === characters[this_chid].chat) {
+ chat_metadata = {};
+ await replaceCurrentChat();
+ }
+ await eventSource.emit(event_types.CHAT_DELETED, name);
+ }
+}
+
+export async function replaceCurrentChat() {
+ await clearChat();
+ chat.length = 0;
+
+ const chatsResponse = await fetch('/api/characters/chats', {
+ method: 'POST',
+ headers: getRequestHeaders(),
+ body: JSON.stringify({ avatar_url: characters[this_chid].avatar }),
+ });
+
+ if (chatsResponse.ok) {
+ const chats = Object.values(await chatsResponse.json());
+ chats.sort((a, b) => sortMoments(timestampToMoment(a.last_mes), timestampToMoment(b.last_mes)));
+
+ // pick existing chat
+ if (chats.length && typeof chats[0] === 'object') {
+ characters[this_chid].chat = chats[0].file_name.replace('.jsonl', '');
+ $('#selected_chat_pole').val(characters[this_chid].chat);
+ saveCharacterDebounced();
+ await getChat();
+ }
+
+ // start new chat
+ else {
+ characters[this_chid].chat = `${name2} - ${humanizedDateTime()}`;
+ $('#selected_chat_pole').val(characters[this_chid].chat);
+ saveCharacterDebounced();
+ await getChat();
+ }
+ }
+}
+
+export function showMoreMessages(messagesToLoad = null) {
+ const firstDisplayedMesId = $('#chat').children('.mes').first().attr('mesid');
+ let messageId = Number(firstDisplayedMesId);
+ let count = messagesToLoad || power_user.chat_truncation || Number.MAX_SAFE_INTEGER;
+
+ // If there are no messages displayed, or the message somehow has no mesid, we default to one higher than last message id,
+ // so the first "new" message being shown will be the last available message
+ if (isNaN(messageId)) {
+ messageId = getLastMessageId() + 1;
+ }
+
+ console.debug('Inserting messages before', messageId, 'count', count, 'chat length', chat.length);
+ const prevHeight = $('#chat').prop('scrollHeight');
+ const isButtonInView = isElementInViewport($('#show_more_messages')[0]);
+
+ while (messageId > 0 && count > 0) {
+ let newMessageId = messageId - 1;
+ addOneMessage(chat[newMessageId], { insertBefore: messageId >= chat.length ? null : messageId, scroll: false, forceId: newMessageId });
+ count--;
+ messageId--;
+ }
+
+ if (messageId == 0) {
+ $('#show_more_messages').remove();
+ }
+
+ if (isButtonInView) {
+ const newHeight = $('#chat').prop('scrollHeight');
+ $('#chat').scrollTop(newHeight - prevHeight);
+ }
+}
+
+export async function printMessages() {
+ let startIndex = 0;
+ let count = power_user.chat_truncation || Number.MAX_SAFE_INTEGER;
+
+ if (chat.length > count) {
+ startIndex = chat.length - count;
+ $('#chat').append('
Show more messages
');
+ }
+
+ for (let i = startIndex; i < chat.length; i++) {
+ const item = chat[i];
+ addOneMessage(item, { scroll: false, forceId: i, showSwipes: false });
+ }
+
+ // Scroll to bottom when all images are loaded
+ const images = document.querySelectorAll('#chat .mes img');
+ let imagesLoaded = 0;
+
+ for (let i = 0; i < images.length; i++) {
+ const image = images[i];
+ if (image instanceof HTMLImageElement) {
+ if (image.complete) {
+ incrementAndCheck();
+ } else {
+ image.addEventListener('load', incrementAndCheck);
+ }
+ }
+ }
+
+ $('#chat .mes').removeClass('last_mes');
+ $('#chat .mes').last().addClass('last_mes');
+ hideSwipeButtons();
+ showSwipeButtons();
+ scrollChatToBottom();
+
+ function incrementAndCheck() {
+ imagesLoaded++;
+ if (imagesLoaded === images.length) {
+ scrollChatToBottom();
+ }
+ }
+}
+
+export async function clearChat() {
+ closeMessageEditor();
+ extension_prompts = {};
+ if (is_delete_mode) {
+ $('#dialogue_del_mes_cancel').trigger('click');
+ }
+ $('#chat').children().remove();
+ if ($('.zoomed_avatar[forChar]').length) {
+ console.debug('saw avatars to remove');
+ $('.zoomed_avatar[forChar]').remove();
+ } else { console.debug('saw no avatars'); }
+
+ await saveItemizedPrompts(getCurrentChatId());
+ itemizedPrompts = [];
+}
+
+export async function deleteLastMessage() {
+ chat.length = chat.length - 1;
+ $('#chat').children('.mes').last().remove();
+ await eventSource.emit(event_types.MESSAGE_DELETED, chat.length);
+}
+
+export async function reloadCurrentChat() {
+ preserveNeutralChat();
+ await clearChat();
+ chat.length = 0;
+
+ if (selected_group) {
+ await getGroupChat(selected_group, true);
+ }
+ else if (this_chid !== undefined) {
+ await getChat();
+ }
+ else {
+ resetChatState();
+ restoreNeutralChat();
+ await getCharacters();
+ await printMessages();
+ await eventSource.emit(event_types.CHAT_CHANGED, getCurrentChatId());
+ }
+
+ hideSwipeButtons();
+ showSwipeButtons();
+}
+
+/**
+ * Send the message currently typed into the chat box.
+ */
+export async function sendTextareaMessage() {
+ if (is_send_press) return;
+ if (isExecutingCommandsFromChatInput) return;
+ if (this_edit_mes_id) return; // don't proceed if editing a message
+
+ let generateType;
+ // "Continue on send" is activated when the user hits "send" (or presses enter) on an empty chat box, and the last
+ // message was sent from a character (not the user or the system).
+ const textareaText = String($('#send_textarea').val());
+ if (power_user.continue_on_send &&
+ !hasPendingFileAttachment() &&
+ !textareaText &&
+ !selected_group &&
+ chat.length &&
+ !chat[chat.length - 1]['is_user'] &&
+ !chat[chat.length - 1]['is_system']
+ ) {
+ generateType = 'continue';
+ }
+
+ if (textareaText && !selected_group && this_chid === undefined && name2 !== neutralCharacterName) {
+ await newAssistantChat();
+ }
+
+ Generate(generateType);
+}
+
+/**
+ * Formats the message text into an HTML string using Markdown and other formatting.
+ * @param {string} mes Message text
+ * @param {string} ch_name Character name
+ * @param {boolean} isSystem If the message was sent by the system
+ * @param {boolean} isUser If the message was sent by the user
+ * @param {number} messageId Message index in chat array
+ * @param {object} [sanitizerOverrides] DOMPurify sanitizer option overrides
+ * @returns {string} HTML string
+ */
+export function messageFormatting(mes, ch_name, isSystem, isUser, messageId, sanitizerOverrides = {}) {
+ if (!mes) {
+ return '';
+ }
+
+ if (Number(messageId) === 0 && !isSystem && !isUser) {
+ const mesBeforeReplace = mes;
+ const chatMessage = chat[messageId];
+ mes = substituteParams(mes, undefined, ch_name);
+ if (chatMessage && chatMessage.mes === mesBeforeReplace && chatMessage.extra?.display_text !== mesBeforeReplace) {
+ chatMessage.mes = mes;
+ }
+ }
+
+ mesForShowdownParse = mes;
+
+ // Force isSystem = false on comment messages so they get formatted properly
+ if (ch_name === COMMENT_NAME_DEFAULT && isSystem && !isUser) {
+ isSystem = false;
+ }
+
+ // Let hidden messages have markdown
+ if (isSystem && ch_name !== systemUserName) {
+ isSystem = false;
+ }
+
+ // Prompt bias replacement should be applied on the raw message
+ if (!power_user.show_user_prompt_bias && ch_name && !isUser && !isSystem) {
+ mes = mes.replaceAll(substituteParams(power_user.user_prompt_bias), '');
+ }
+
+ if (!isSystem) {
+ function getRegexPlacement() {
+ try {
+ if (isUser) {
+ return regex_placement.USER_INPUT;
+ } else if (chat[messageId]?.extra?.type === 'narrator') {
+ return regex_placement.SLASH_COMMAND;
+ } else {
+ return regex_placement.AI_OUTPUT;
+ }
+ } catch {
+ return regex_placement.AI_OUTPUT;
+ }
+ }
+
+ const regexPlacement = getRegexPlacement();
+ const usableMessages = chat.map((x, index) => ({ message: x, index: index })).filter(x => !x.message.is_system);
+ const indexOf = usableMessages.findIndex(x => x.index === Number(messageId));
+ const depth = messageId >= 0 && indexOf !== -1 ? (usableMessages.length - indexOf - 1) : undefined;
+
+ // Always override the character name
+ mes = getRegexedString(mes, regexPlacement, {
+ characterOverride: ch_name,
+ isMarkdown: true,
+ depth: depth,
+ });
+ }
+
+ if (power_user.auto_fix_generated_markdown) {
+ mes = fixMarkdown(mes, true);
+ }
+
+ if (!isSystem && power_user.encode_tags) {
+ mes = mes.replaceAll('<', '<').replaceAll('>', '>');
+ }
+
+ if (!isSystem) {
+ // Save double quotes in tags as a special character to prevent them from being encoded
+ if (!power_user.encode_tags) {
+ mes = mes.replace(/<([^>]+)>/g, function (_, contents) {
+ return '<' + contents.replace(/"/g, '\ufffe') + '>';
+ });
+ }
+
+ mes = mes.replace(
+ /```[\s\S]*?```|``[\s\S]*?``|`[\s\S]*?`|(".*?")|(\u201C.*?\u201D)|(\u00AB.*?\u00BB)|(\u300C.*?\u300D)|(\u300E.*?\u300F)|(\uFF02.*?\uFF02)/gm,
+ function (match, p1, p2, p3, p4, p5, p6) {
+ if (p1) {
+ // English double quotes
+ return `"${p1.slice(1, -1)}"`;
+ } else if (p2) {
+ // Curly double quotes “ ”
+ return `“${p2.slice(1, -1)}”`;
+ } else if (p3) {
+ // Guillemets « »
+ return `«${p3.slice(1, -1)}»`;
+ } else if (p4) {
+ // Corner brackets 「 」
+ return `「${p4.slice(1, -1)}」`;
+ } else if (p5) {
+ // White corner brackets 『 』
+ return `『${p5.slice(1, -1)}』`;
+ } else if (p6) {
+ // Fullwidth quotes " "
+ return `"${p6.slice(1, -1)}"`;
+ } else {
+ // Return the original match if no quotes are found
+ return match;
+ }
+ },
+ );
+
+ // Restore double quotes in tags
+ if (!power_user.encode_tags) {
+ mes = mes.replace(/\ufffe/g, '"');
+ }
+
+ mes = mes.replaceAll('\\begin{align*}', '$$');
+ mes = mes.replaceAll('\\end{align*}', '$$');
+ mes = converter.makeHtml(mes);
+
+ mes = mes.replace(/[\s\S]*?<\/code>/g, function (match) {
+ // Firefox creates extra newlines from s in code blocks, so we replace them before converting newlines to s.
+ return match.replace(/\n/gm, '\u0000');
+ });
+ mes = mes.replace(/\u0000/g, '\n'); // Restore converted newlines
+ mes = mes.trim();
+
+ mes = mes.replace(/[\s\S]*?<\/code>/g, function (match) {
+ return match.replace(/&/g, '&');
+ });
+ }
+
+ if (!power_user.allow_name2_display && ch_name && !isUser && !isSystem) {
+ mes = mes.replace(new RegExp(`(^|\n)${escapeRegex(ch_name)}:`, 'g'), '$1');
+ }
+
+ /** @type {import('dompurify').Config & { RETURN_DOM_FRAGMENT: false; RETURN_DOM: false }} */
+ const config = {
+ RETURN_DOM: false,
+ RETURN_DOM_FRAGMENT: false,
+ RETURN_TRUSTED_TYPE: false,
+ MESSAGE_SANITIZE: true,
+ ADD_TAGS: ['custom-style'],
+ ...sanitizerOverrides,
+ };
+ mes = encodeStyleTags(mes);
+ mes = DOMPurify.sanitize(mes, config);
+ mes = decodeStyleTags(mes);
+
+ return mes;
+}
+
+/**
+ * Inserts or replaces an SVG icon adjacent to the provided message's timestamp.
+ *
+ * If the `extra.api` is "openai" and `extra.model` contains the substring "claude",
+ * the function fetches the "claude.svg". Otherwise, it fetches the SVG named after
+ * the value in `extra.api`.
+ *
+ * @param {JQuery} mes - The message element containing the timestamp where the icon should be inserted or replaced.
+ * @param {Object} extra - Contains the API and model details.
+ * @param {string} extra.api - The name of the API, used to determine which SVG to fetch.
+ * @param {string} extra.model - The model name, used to check for the substring "claude".
+ */
+function insertSVGIcon(mes, extra) {
+ // Determine the SVG filename
+ let modelName;
+
+ // Claude on OpenRouter or Anthropic
+ if (extra.api === 'openai' && extra.model?.toLowerCase().includes('claude')) {
+ modelName = 'claude';
+ }
+ // OpenAI on OpenRouter
+ else if (extra.api === 'openai' && extra.model?.toLowerCase().includes('openai')) {
+ modelName = 'openai';
+ }
+ // OpenRouter website model or other models
+ else if (extra.api === 'openai' && (extra.model === null || extra.model?.toLowerCase().includes('/'))) {
+ modelName = 'openrouter';
+ }
+ // Everything else
+ else {
+ modelName = extra.api;
+ }
+
+ const image = new Image();
+ // Add classes for styling and identification
+ image.classList.add('icon-svg', 'timestamp-icon');
+ image.src = `/img/${modelName}.svg`;
+ image.title = `${extra?.api ? extra.api + ' - ' : ''}${extra?.model ?? ''}`;
+
+ image.onload = async function () {
+ // Check if an SVG already exists adjacent to the timestamp
+ let existingSVG = mes.find('.timestamp').next('.timestamp-icon');
+
+ if (existingSVG.length) {
+ // Replace existing SVG
+ existingSVG.replaceWith(image);
+ } else {
+ // Append the new SVG if none exists
+ mes.find('.timestamp').after(image);
+ }
+
+ await SVGInject(image);
+ };
+}
+
+
+function getMessageFromTemplate({
+ mesId,
+ swipeId,
+ characterName,
+ isUser,
+ avatarImg,
+ bias,
+ isSystem,
+ title,
+ timerValue,
+ timerTitle,
+ bookmarkLink,
+ forceAvatar,
+ timestamp,
+ tokenCount,
+ extra,
+}) {
+ const mes = messageTemplate.clone();
+ mes.attr({
+ 'mesid': mesId,
+ 'swipeid': swipeId,
+ 'ch_name': characterName,
+ 'is_user': isUser,
+ 'is_system': !!isSystem,
+ 'bookmark_link': bookmarkLink,
+ 'force_avatar': !!forceAvatar,
+ 'timestamp': timestamp,
+ });
+ mes.find('.avatar img').attr('src', avatarImg);
+ mes.find('.ch_name .name_text').text(characterName);
+ mes.find('.mes_bias').html(bias);
+ mes.find('.timestamp').text(timestamp).attr('title', `${extra?.api ? extra.api + ' - ' : ''}${extra?.model ?? ''}`);
+ mes.find('.mesIDDisplay').text(`#${mesId}`);
+ tokenCount && mes.find('.tokenCounterDisplay').text(`${tokenCount}t`);
+ title && mes.attr('title', title);
+ timerValue && mes.find('.mes_timer').attr('title', timerTitle).text(timerValue);
+ bookmarkLink && updateBookmarkDisplay(mes);
+
+ if (power_user.timestamp_model_icon && extra?.api) {
+ insertSVGIcon(mes, extra);
+ }
+
+ return mes;
+}
+
+export function updateMessageBlock(messageId, message) {
+ const messageElement = $(`#chat [mesid="${messageId}"]`);
+ const text = message?.extra?.display_text ?? message.mes;
+ messageElement.find('.mes_text').html(messageFormatting(text, message.name, message.is_system, message.is_user, messageId));
+ addCopyToCodeBlocks(messageElement);
+ appendMediaToMessage(message, messageElement);
+}
+
+/**
+ * Appends image or file to the message element.
+ * @param {object} mes Message object
+ * @param {JQuery} messageElement Message element
+ * @param {boolean} [adjustScroll=true] Whether to adjust the scroll position after appending the media
+ */
+export function appendMediaToMessage(mes, messageElement, adjustScroll = true) {
+ // Add image to message
+ if (mes.extra?.image) {
+ const container = messageElement.find('.mes_img_container');
+ const chatHeight = $('#chat').prop('scrollHeight');
+ const image = messageElement.find('.mes_img');
+ const text = messageElement.find('.mes_text');
+ const isInline = !!mes.extra?.inline_image;
+ image.off('load').on('load', function () {
+ if (!adjustScroll) {
+ return;
+ }
+ const scrollPosition = $('#chat').scrollTop();
+ const newChatHeight = $('#chat').prop('scrollHeight');
+ const diff = newChatHeight - chatHeight;
+ $('#chat').scrollTop(scrollPosition + diff);
+ });
+ image.attr('src', mes.extra?.image);
+ image.attr('title', mes.extra?.title || mes.title || '');
+ container.addClass('img_extra');
+ image.toggleClass('img_inline', isInline);
+ text.toggleClass('displayNone', !isInline);
+
+ const imageSwipes = mes.extra.image_swipes;
+ if (Array.isArray(imageSwipes) && imageSwipes.length > 0) {
+ container.addClass('img_swipes');
+ const counter = container.find('.mes_img_swipe_counter');
+ const currentImage = imageSwipes.indexOf(mes.extra.image) + 1;
+ counter.text(`${currentImage}/${imageSwipes.length}`);
+
+ const swipeLeft = container.find('.mes_img_swipe_left');
+ swipeLeft.off('click').on('click', function () {
+ eventSource.emit(event_types.IMAGE_SWIPED, { message: mes, element: messageElement, direction: 'left' });
+ });
+
+ const swipeRight = container.find('.mes_img_swipe_right');
+ swipeRight.off('click').on('click', function () {
+ eventSource.emit(event_types.IMAGE_SWIPED, { message: mes, element: messageElement, direction: 'right' });
+ });
+ }
+ }
+
+ // Add file to message
+ if (mes.extra?.file) {
+ messageElement.find('.mes_file_container').remove();
+ const messageId = messageElement.attr('mesid');
+ const template = $('#message_file_template .mes_file_container').clone();
+ template.find('.mes_file_name').text(mes.extra.file.name);
+ template.find('.mes_file_size').text(humanFileSize(mes.extra.file.size));
+ template.find('.mes_file_download').attr('mesid', messageId);
+ template.find('.mes_file_delete').attr('mesid', messageId);
+ messageElement.find('.mes_block').append(template);
+ } else {
+ messageElement.find('.mes_file_container').remove();
+ }
+}
+
+/**
+ * @deprecated Use appendMediaToMessage instead.
+ */
+export function appendImageToMessage(mes, messageElement) {
+ appendMediaToMessage(mes, messageElement);
+}
+
+export function addCopyToCodeBlocks(messageElement) {
+ const codeBlocks = $(messageElement).find('pre code');
+ for (let i = 0; i < codeBlocks.length; i++) {
+ hljs.highlightElement(codeBlocks.get(i));
+ const copyButton = document.createElement('i');
+ copyButton.classList.add('fa-solid', 'fa-copy', 'code-copy', 'interactable');
+ copyButton.title = 'Copy code';
+ codeBlocks.get(i).appendChild(copyButton);
+ copyButton.addEventListener('pointerup', async function () {
+ const text = codeBlocks.get(i).innerText;
+ await copyText(text);
+ toastr.info(t`Copied!`, '', { timeOut: 2000 });
+ });
+ }
+}
+
+
+/**
+ * Adds a single message to the chat.
+ * @param {object} mes Message object
+ * @param {object} [options] Options
+ * @param {string} [options.type='normal'] Message type
+ * @param {number} [options.insertAfter=null] Message ID to insert the new message after
+ * @param {boolean} [options.scroll=true] Whether to scroll to the new message
+ * @param {number} [options.insertBefore=null] Message ID to insert the new message before
+ * @param {number} [options.forceId=null] Force the message ID
+ * @param {boolean} [options.showSwipes=true] Whether to show swipe buttons
+ * @returns {void}
+ */
+export function addOneMessage(mes, { type = 'normal', insertAfter = null, scroll = true, insertBefore = null, forceId = null, showSwipes = true } = {}) {
+ let messageText = mes['mes'];
+ const momentDate = timestampToMoment(mes.send_date);
+ const timestamp = momentDate.isValid() ? momentDate.format('LL LT') : '';
+
+ if (mes?.extra?.display_text) {
+ messageText = mes.extra.display_text;
+ }
+
+ // Forbidden black magic
+ // This allows to use "continue" on user messages
+ if (type === 'swipe' && mes.swipe_id === undefined) {
+ mes.swipe_id = 0;
+ mes.swipes = [mes.mes];
+ }
+
+ let avatarImg = getUserAvatar(user_avatar);
+ const isSystem = mes.is_system;
+ const title = mes.title;
+ generatedPromptCache = '';
+
+ //for non-user mesages
+ if (!mes['is_user']) {
+ if (mes.force_avatar) {
+ avatarImg = mes.force_avatar;
+ } else if (this_chid === undefined) {
+ avatarImg = system_avatar;
+ } else {
+ if (characters[this_chid].avatar !== 'none') {
+ avatarImg = getThumbnailUrl('avatar', characters[this_chid].avatar);
+ } else {
+ avatarImg = default_avatar;
+ }
+ }
+ //old processing:
+ //if messge is from sytem, use the name provided in the message JSONL to proceed,
+ //if not system message, use name2 (char's name) to proceed
+ //characterName = mes.is_system || mes.force_avatar ? mes.name : name2;
+ } else if (mes['is_user'] && mes['force_avatar']) {
+ // Special case for persona images.
+ avatarImg = mes['force_avatar'];
+ }
+
+ // if mes.uses_system_ui is true, set an override on the sanitizer options
+ const sanitizerOverrides = mes.uses_system_ui ? { MESSAGE_ALLOW_SYSTEM_UI: true } : {};
+
+ messageText = messageFormatting(
+ messageText,
+ mes.name,
+ isSystem,
+ mes.is_user,
+ chat.indexOf(mes),
+ sanitizerOverrides,
+ );
+ const bias = messageFormatting(mes.extra?.bias ?? '', '', false, false, -1);
+ let bookmarkLink = mes?.extra?.bookmark_link ?? '';
+
+ let params = {
+ mesId: forceId ?? chat.length - 1,
+ swipeId: mes.swipe_id ?? 0,
+ characterName: mes.name,
+ isUser: mes.is_user,
+ avatarImg: avatarImg,
+ bias: bias,
+ isSystem: isSystem,
+ title: title,
+ bookmarkLink: bookmarkLink,
+ forceAvatar: mes.force_avatar,
+ timestamp: timestamp,
+ extra: mes.extra,
+ tokenCount: mes.extra?.token_count ?? 0,
+ ...formatGenerationTimer(mes.gen_started, mes.gen_finished, mes.extra?.token_count),
+ };
+
+ const renderedMessage = getMessageFromTemplate(params);
+
+ if (type !== 'swipe') {
+ if (!insertAfter && !insertBefore) {
+ chatElement.append(renderedMessage);
+ }
+ else if (insertAfter) {
+ const target = chatElement.find(`.mes[mesid="${insertAfter}"]`);
+ $(renderedMessage).insertAfter(target);
+ } else {
+ const target = chatElement.find(`.mes[mesid="${insertBefore}"]`);
+ $(renderedMessage).insertBefore(target);
+ }
+ }
+
+ // Callers push the new message to chat before calling addOneMessage
+ const newMessageId = typeof forceId == 'number' ? forceId : chat.length - 1;
+
+ const newMessage = $(`#chat [mesid="${newMessageId}"]`);
+ const isSmallSys = mes?.extra?.isSmallSys;
+
+ if (isSmallSys === true) {
+ newMessage.addClass('smallSysMes');
+ }
+
+ if (Array.isArray(mes?.extra?.tool_invocations)) {
+ newMessage.addClass('toolCall');
+ }
+
+ //shows or hides the Prompt display button
+ let mesIdToFind = type === 'swipe' ? params.mesId - 1 : params.mesId; //Number(newMessage.attr('mesId'));
+
+ //if we have itemized messages, and the array isn't null..
+ if (params.isUser === false && Array.isArray(itemizedPrompts) && itemizedPrompts.length > 0) {
+ const itemizedPrompt = itemizedPrompts.find(x => Number(x.mesId) === Number(mesIdToFind));
+ if (itemizedPrompt) {
+ newMessage.find('.mes_prompt').show();
+ }
+ }
+
+ newMessage.find('.avatar img').on('error', function () {
+ $(this).hide();
+ $(this).parent().html('');
+ });
+
+ if (type === 'swipe') {
+ const swipeMessage = chatElement.find(`[mesid="${chat.length - 1}"]`);
+ swipeMessage.attr('swipeid', params.swipeId);
+ swipeMessage.find('.mes_text').html(messageText).attr('title', title);
+ swipeMessage.find('.timestamp').text(timestamp).attr('title', `${params.extra.api} - ${params.extra.model}`);
+ appendMediaToMessage(mes, swipeMessage);
+ if (power_user.timestamp_model_icon && params.extra?.api) {
+ insertSVGIcon(swipeMessage, params.extra);
+ }
+
+ if (mes.swipe_id == mes.swipes.length - 1) {
+ swipeMessage.find('.mes_timer').text(params.timerValue).attr('title', params.timerTitle);
+ swipeMessage.find('.tokenCounterDisplay').text(`${params.tokenCount}t`);
+ } else {
+ swipeMessage.find('.mes_timer').empty();
+ swipeMessage.find('.tokenCounterDisplay').empty();
+ }
+ } else {
+ const messageId = forceId ?? chat.length - 1;
+ chatElement.find(`[mesid="${messageId}"] .mes_text`).append(messageText);
+ appendMediaToMessage(mes, newMessage);
+ showSwipes && hideSwipeButtons();
+ }
+
+ addCopyToCodeBlocks(newMessage);
+
+ // Set the swipes counter for past messages, only visible if 'Show Swipes on All Message' is enabled
+ if (!params.isUser && newMessageId !== 0 && newMessageId !== chat.length - 1) {
+ const swipesNum = chat[newMessageId].swipes?.length;
+ const swipeId = chat[newMessageId].swipe_id + 1;
+ newMessage.find('.swipes-counter').text(formatSwipeCounter(swipeId, swipesNum));
+ }
+
+ if (showSwipes) {
+ $('#chat .mes').last().addClass('last_mes');
+ $('#chat .mes').eq(-2).removeClass('last_mes');
+ hideSwipeButtons();
+ showSwipeButtons();
+ }
+
+ // Don't scroll if not inserting last
+ if (!insertAfter && !insertBefore && scroll) {
+ scrollChatToBottom();
+ }
+}
+
+/**
+ * Returns the URL of the avatar for the given character Id.
+ * @param {number} characterId Character Id
+ * @returns {string} Avatar URL
+ */
+export function getCharacterAvatar(characterId) {
+ const character = characters[characterId];
+ const avatarImg = character?.avatar;
+
+ if (!avatarImg || avatarImg === 'none') {
+ return default_avatar;
+ }
+
+ return formatCharacterAvatar(avatarImg);
+}
+
+export function formatCharacterAvatar(characterAvatar) {
+ return `characters/${characterAvatar}`;
+}
+
+/**
+ * Formats the title for the generation timer.
+ * @param {Date} gen_started Date when generation was started
+ * @param {Date} gen_finished Date when generation was finished
+ * @param {number} tokenCount Number of tokens generated (0 if not available)
+ * @returns {Object} Object containing the formatted timer value and title
+ * @example
+ * const { timerValue, timerTitle } = formatGenerationTimer(gen_started, gen_finished, tokenCount);
+ * console.log(timerValue); // 1.2s
+ * console.log(timerTitle); // Generation queued: 12:34:56 7 Jan 2021\nReply received: 12:34:57 7 Jan 2021\nTime to generate: 1.2 seconds\nToken rate: 5 t/s
+ */
+function formatGenerationTimer(gen_started, gen_finished, tokenCount) {
+ if (!gen_started || !gen_finished) {
+ return {};
+ }
+
+ const dateFormat = 'HH:mm:ss D MMM YYYY';
+ const start = moment(gen_started);
+ const finish = moment(gen_finished);
+ const seconds = finish.diff(start, 'seconds', true);
+ const timerValue = `${seconds.toFixed(1)}s`;
+ const timerTitle = [
+ `Generation queued: ${start.format(dateFormat)}`,
+ `Reply received: ${finish.format(dateFormat)}`,
+ `Time to generate: ${seconds} seconds`,
+ tokenCount > 0 ? `Token rate: ${Number(tokenCount / seconds).toFixed(1)} t/s` : '',
+ ].join('\n');
+
+ if (isNaN(seconds) || seconds < 0) {
+ return { timerValue: '', timerTitle };
+ }
+
+ return { timerValue, timerTitle };
+}
+
+export function scrollChatToBottom() {
+ if (power_user.auto_scroll_chat_to_bottom) {
+ let position = chatElement[0].scrollHeight;
+
+ if (power_user.waifuMode) {
+ const lastMessage = chatElement.find('.mes').last();
+ if (lastMessage.length) {
+ const lastMessagePosition = lastMessage.position().top;
+ position = chatElement.scrollTop() + lastMessagePosition;
+ }
+ }
+
+ chatElement.scrollTop(position);
+ }
+}
+
+/**
+ * Substitutes {{macro}} parameters in a string.
+ * @param {string} content - The string to substitute parameters in.
+ * @param {Record} additionalMacro - Additional environment variables for substitution.
+ * @param {(x: string) => string} [postProcessFn] - Post-processing function for each substituted macro.
+ * @returns {string} The string with substituted parameters.
+ */
+export function substituteParamsExtended(content, additionalMacro = {}, postProcessFn = (x) => x) {
+ return substituteParams(content, undefined, undefined, undefined, undefined, true, additionalMacro, postProcessFn);
+}
+
+/**
+ * Substitutes {{macro}} parameters in a string.
+ * @param {string} content - The string to substitute parameters in.
+ * @param {string} [_name1] - The name of the user. Uses global name1 if not provided.
+ * @param {string} [_name2] - The name of the character. Uses global name2 if not provided.
+ * @param {string} [_original] - The original message for {{original}} substitution.
+ * @param {string} [_group] - The group members list for {{group}} substitution.
+ * @param {boolean} [_replaceCharacterCard] - Whether to replace character card macros.
+ * @param {Record} [additionalMacro] - Additional environment variables for substitution.
+ * @param {(x: string) => string} [postProcessFn] - Post-processing function for each substituted macro.
+ * @returns {string} The string with substituted parameters.
+ */
+export function substituteParams(content, _name1, _name2, _original, _group, _replaceCharacterCard = true, additionalMacro = {}, postProcessFn = (x) => x) {
+ if (!content) {
+ return '';
+ }
+
+ const environment = {};
+
+ if (typeof _original === 'string') {
+ let originalSubstituted = false;
+ environment.original = () => {
+ if (originalSubstituted) {
+ return '';
+ }
+
+ originalSubstituted = true;
+ return _original;
+ };
+ }
+
+ const getGroupValue = (includeMuted) => {
+ if (typeof _group === 'string') {
+ return _group;
+ }
+
+ if (selected_group) {
+ const members = groups.find(x => x.id === selected_group)?.members;
+ /** @type {string[]} */
+ const disabledMembers = groups.find(x => x.id === selected_group)?.disabled_members ?? [];
+ const isMuted = x => includeMuted ? true : !disabledMembers.includes(x);
+ const names = Array.isArray(members)
+ ? members.filter(isMuted).map(m => characters.find(c => c.avatar === m)?.name).filter(Boolean).join(', ')
+ : '';
+ return names;
+ } else {
+ return _name2 ?? name2;
+ }
+ };
+
+ if (_replaceCharacterCard) {
+ const fields = getCharacterCardFields();
+ environment.charPrompt = fields.system || '';
+ environment.charInstruction = environment.charJailbreak = fields.jailbreak || '';
+ environment.description = fields.description || '';
+ environment.personality = fields.personality || '';
+ environment.scenario = fields.scenario || '';
+ environment.persona = fields.persona || '';
+ environment.mesExamples = fields.mesExamples || '';
+ environment.charVersion = fields.version || '';
+ environment.char_version = fields.version || '';
+ }
+
+ // Must be substituted last so that they're replaced inside {{description}}
+ environment.user = _name1 ?? name1;
+ environment.char = _name2 ?? name2;
+ environment.group = environment.charIfNotGroup = getGroupValue(true);
+ environment.groupNotMuted = getGroupValue(false);
+ environment.model = getGeneratingModel();
+
+ if (additionalMacro && typeof additionalMacro === 'object') {
+ Object.assign(environment, additionalMacro);
+ }
+
+ return evaluateMacros(content, environment, postProcessFn);
+}
+
+
+/**
+ * Gets stopping sequences for the prompt.
+ * @param {boolean} isImpersonate A request is made to impersonate a user
+ * @param {boolean} isContinue A request is made to continue the message
+ * @returns {string[]} Array of stopping strings
+ */
+export function getStoppingStrings(isImpersonate, isContinue) {
+ const result = [];
+
+ if (power_user.context.names_as_stop_strings) {
+ const charString = `\n${name2}:`;
+ const userString = `\n${name1}:`;
+ result.push(isImpersonate ? charString : userString);
+
+ result.push(userString);
+
+ if (isContinue && Array.isArray(chat) && chat[chat.length - 1]?.is_user) {
+ result.push(charString);
+ }
+
+ // Add group members as stopping strings if generating for a specific group member or user. (Allow slash commands to work around name stopping string restrictions)
+ if (selected_group && (name2 || isImpersonate)) {
+ const group = groups.find(x => x.id === selected_group);
+
+ if (group && Array.isArray(group.members)) {
+ const names = group.members
+ .map(x => characters.find(y => y.avatar == x))
+ .filter(x => x && x.name && x.name !== name2)
+ .map(x => `\n${x.name}:`);
+ result.push(...names);
+ }
+ }
+ }
+
+ result.push(...getInstructStoppingSequences());
+ result.push(...getCustomStoppingStrings());
+
+ if (power_user.single_line) {
+ result.unshift('\n');
+ }
+
+ return result.filter(x => x).filter(onlyUnique);
+}
+
+/**
+ * Background generation based on the provided prompt.
+ * @param {string} quiet_prompt Instruction prompt for the AI
+ * @param {boolean} quietToLoud Whether the message should be sent in a foreground (loud) or background (quiet) mode
+ * @param {boolean} skipWIAN whether to skip addition of World Info and Author's Note into the prompt
+ * @param {string} quietImage Image to use for the quiet prompt
+ * @param {string} quietName Name to use for the quiet prompt (defaults to "System:")
+ * @param {number} [responseLength] Maximum response length. If unset, the global default value is used.
+ * @param {number} force_chid Character ID to use for this generation run. Works in groups only.
+ * @returns
+ */
+export async function generateQuietPrompt(quiet_prompt, quietToLoud, skipWIAN, quietImage = null, quietName = null, responseLength = null, force_chid = null) {
+ console.log('got into genQuietPrompt');
+ const responseLengthCustomized = typeof responseLength === 'number' && responseLength > 0;
+ let eventHook = () => { };
+ try {
+ /** @type {GenerateOptions} */
+ const options = {
+ quiet_prompt,
+ quietToLoud,
+ skipWIAN: skipWIAN,
+ force_name2: true,
+ quietImage: quietImage,
+ quietName: quietName,
+ force_chid: force_chid,
+ };
+ if (responseLengthCustomized) {
+ TempResponseLength.save(main_api, responseLength);
+ eventHook = TempResponseLength.setupEventHook(main_api);
+ }
+ return await Generate('quiet', options);
+ } finally {
+ if (responseLengthCustomized && TempResponseLength.isCustomized()) {
+ TempResponseLength.restore(main_api);
+ TempResponseLength.removeEventHook(main_api, eventHook);
+ }
+ }
+}
+
+/**
+ * Executes slash commands and returns the new text and whether the generation was interrupted.
+ * @param {string} message Text to be sent
+ * @returns {Promise} Whether the message sending was interrupted
+ */
+export async function processCommands(message) {
+ if (!message || !message.trim().startsWith('/')) {
+ return false;
+ }
+ await executeSlashCommandsOnChatInput(message, {
+ clearChatInput: true,
+ });
+ return true;
+}
+
+export function sendSystemMessage(type, text, extra = {}) {
+ const systemMessage = system_messages[type];
+
+ if (!systemMessage) {
+ return;
+ }
+
+ const newMessage = { ...systemMessage, send_date: getMessageTimeStamp() };
+
+ if (text) {
+ newMessage.mes = text;
+ }
+
+ if (type === system_message_types.SLASH_COMMANDS) {
+ newMessage.mes = getSlashCommandsHelp();
+ }
+
+ if (!newMessage.extra) {
+ newMessage.extra = {};
+ }
+
+ newMessage.extra = Object.assign(newMessage.extra, extra);
+ newMessage.extra.type = type;
+
+ chat.push(newMessage);
+ addOneMessage(newMessage);
+ is_send_press = false;
+ if (type === system_message_types.SLASH_COMMANDS) {
+ const browser = new SlashCommandBrowser();
+ const spinner = document.querySelector('#chat .last_mes .custom-slashHelp');
+ const parent = spinner.parentElement;
+ spinner.remove();
+ browser.renderInto(parent);
+ browser.search.focus();
+ }
+}
+
+/**
+ * Extracts the contents of bias macros from a message.
+ * @param {string} message Message text
+ * @returns {string} Message bias extracted from the message (or an empty string if not found)
+ */
+export function extractMessageBias(message) {
+ if (!message) {
+ return '';
+ }
+
+ try {
+ const biasHandlebars = Handlebars.create();
+ const biasMatches = [];
+ biasHandlebars.registerHelper('bias', function (text) {
+ biasMatches.push(text);
+ return '';
+ });
+ const template = biasHandlebars.compile(message);
+ template({});
+
+ if (biasMatches && biasMatches.length > 0) {
+ return ` ${biasMatches.join(' ')}`;
+ }
+
+ return '';
+ } catch {
+ return '';
+ }
+}
+
+/**
+ * Removes impersonated group member lines from the group member messages.
+ * Doesn't do anything if group reply trimming is disabled.
+ * @param {string} getMessage Group message
+ * @returns Cleaned-up group message
+ */
+function cleanGroupMessage(getMessage) {
+ if (power_user.disable_group_trimming) {
+ return getMessage;
+ }
+
+ const group = groups.find((x) => x.id == selected_group);
+
+ if (group && Array.isArray(group.members) && group.members) {
+ for (let member of group.members) {
+ const character = characters.find(x => x.avatar == member);
+
+ if (!character) {
+ continue;
+ }
+
+ const name = character.name;
+
+ // Skip current speaker.
+ if (name === name2) {
+ continue;
+ }
+
+ const regex = new RegExp(`(^|\n)${escapeRegex(name)}:`);
+ const nameMatch = getMessage.match(regex);
+ if (nameMatch) {
+ getMessage = getMessage.substring(0, nameMatch.index);
+ }
+ }
+ }
+ return getMessage;
+}
+
+function addPersonaDescriptionExtensionPrompt() {
+ const INJECT_TAG = 'PERSONA_DESCRIPTION';
+ setExtensionPrompt(INJECT_TAG, '', extension_prompt_types.IN_PROMPT, 0);
+
+ if (!power_user.persona_description || power_user.persona_description_position === persona_description_positions.NONE) {
+ return;
+ }
+
+ const promptPositions = [persona_description_positions.BOTTOM_AN, persona_description_positions.TOP_AN];
+
+ if (promptPositions.includes(power_user.persona_description_position) && shouldWIAddPrompt) {
+ const originalAN = extension_prompts[NOTE_MODULE_NAME].value;
+ const ANWithDesc = power_user.persona_description_position === persona_description_positions.TOP_AN
+ ? `${power_user.persona_description}\n${originalAN}`
+ : `${originalAN}\n${power_user.persona_description}`;
+
+ setExtensionPrompt(NOTE_MODULE_NAME, ANWithDesc, chat_metadata[metadata_keys.position], chat_metadata[metadata_keys.depth], extension_settings.note.allowWIScan, chat_metadata[metadata_keys.role]);
+ }
+
+ if (power_user.persona_description_position === persona_description_positions.AT_DEPTH) {
+ setExtensionPrompt(INJECT_TAG, power_user.persona_description, extension_prompt_types.IN_CHAT, power_user.persona_description_depth, true, power_user.persona_description_role);
+ }
+}
+
+/**
+ * Returns all extension prompts combined.
+ * @returns {Promise} Combined extension prompts
+ */
+async function getAllExtensionPrompts() {
+ const values = [];
+
+ for (const prompt of Object.values(extension_prompts)) {
+ const value = prompt?.value?.trim();
+
+ if (!value) {
+ continue;
+ }
+
+ const hasFilter = typeof prompt.filter === 'function';
+ if (hasFilter && !await prompt.filter()) {
+ continue;
+ }
+
+ values.push(value);
+ }
+
+ return substituteParams(values.join('\n'));
+}
+
+/**
+ * Wrapper to fetch extension prompts by module name
+ * @param {string} moduleName Module name
+ * @returns {Promise} Extension prompt
+ */
+export async function getExtensionPromptByName(moduleName) {
+ if (!moduleName) {
+ return '';
+ }
+
+ const prompt = extension_prompts[moduleName];
+
+ if (!prompt) {
+ return '';
+ }
+
+ const hasFilter = typeof prompt.filter === 'function';
+
+ if (hasFilter && !await prompt.filter()) {
+ return '';
+ }
+
+ return substituteParams(prompt.value);
+}
+
+/**
+ * Returns the extension prompt for the given position, depth, and role.
+ * If multiple prompts are found, they are joined with a separator.
+ * @param {number} [position] Position of the prompt
+ * @param {number} [depth] Depth of the prompt
+ * @param {string} [separator] Separator for joining multiple prompts
+ * @param {number} [role] Role of the prompt
+ * @param {boolean} [wrap] Wrap start and end with a separator
+ * @returns {Promise} Extension prompt
+ */
+export async function getExtensionPrompt(position = extension_prompt_types.IN_PROMPT, depth = undefined, separator = '\n', role = undefined, wrap = true) {
+ const filterByFunction = async (prompt) => {
+ const hasFilter = typeof prompt.filter === 'function';
+ if (hasFilter && !await prompt.filter()) {
+ return false;
+ }
+ return true;
+ };
+ const promptPromises = Object.keys(extension_prompts)
+ .sort()
+ .map((x) => extension_prompts[x])
+ .filter(x => x.position == position && x.value)
+ .filter(x => depth === undefined || x.depth === undefined || x.depth === depth)
+ .filter(x => role === undefined || x.role === undefined || x.role === role)
+ .filter(filterByFunction);
+ const prompts = await Promise.all(promptPromises);
+
+ let values = prompts.map(x => x.value.trim()).join(separator);
+ if (wrap && values.length && !values.startsWith(separator)) {
+ values = separator + values;
+ }
+ if (wrap && values.length && !values.endsWith(separator)) {
+ values = values + separator;
+ }
+ if (values.length) {
+ values = substituteParams(values);
+ }
+ return values;
+}
+
+export function baseChatReplace(value, name1, name2) {
+ if (value !== undefined && value.length > 0) {
+ const _ = undefined;
+ value = substituteParams(value, name1, name2, _, _, false);
+
+ if (power_user.collapse_newlines) {
+ value = collapseNewlines(value);
+ }
+
+ value = value.replace(/\r/g, '');
+ }
+ return value;
+}
+
+/**
+ * Returns the character card fields for the current character.
+ * @returns {{system: string, mesExamples: string, description: string, personality: string, persona: string, scenario: string, jailbreak: string, version: string}}
+ */
+export function getCharacterCardFields() {
+ const result = { system: '', mesExamples: '', description: '', personality: '', persona: '', scenario: '', jailbreak: '', version: '' };
+ result.persona = baseChatReplace(power_user.persona_description?.trim(), name1, name2);
+
+ const character = characters[this_chid];
+
+ if (!character) {
+ return result;
+ }
+
+ const scenarioText = chat_metadata['scenario'] || character.scenario || '';
+ result.description = baseChatReplace(character.description?.trim(), name1, name2);
+ result.personality = baseChatReplace(character.personality?.trim(), name1, name2);
+ result.scenario = baseChatReplace(scenarioText.trim(), name1, name2);
+ result.mesExamples = baseChatReplace(character.mes_example?.trim(), name1, name2);
+ result.system = power_user.prefer_character_prompt ? baseChatReplace(character.data?.system_prompt?.trim(), name1, name2) : '';
+ result.jailbreak = power_user.prefer_character_jailbreak ? baseChatReplace(character.data?.post_history_instructions?.trim(), name1, name2) : '';
+ result.version = character.data?.character_version ?? '';
+
+ if (selected_group) {
+ const groupCards = getGroupCharacterCards(selected_group, Number(this_chid));
+
+ if (groupCards) {
+ result.description = groupCards.description;
+ result.personality = groupCards.personality;
+ result.scenario = groupCards.scenario;
+ result.mesExamples = groupCards.mesExamples;
+ }
+ }
+
+ return result;
+}
+
+export function isStreamingEnabled() {
+ const noStreamSources = [chat_completion_sources.SCALE];
+ return (
+ (main_api == 'openai' &&
+ oai_settings.stream_openai &&
+ !noStreamSources.includes(oai_settings.chat_completion_source) &&
+ !(oai_settings.chat_completion_source == chat_completion_sources.OPENAI && oai_settings.openai_model.startsWith('o1-')) &&
+ !(oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE && oai_settings.google_model.includes('bison')))
+ || (main_api == 'kobold' && kai_settings.streaming_kobold && kai_flags.can_use_streaming)
+ || (main_api == 'novel' && nai_settings.streaming_novel)
+ || (main_api == 'textgenerationwebui' && textgen_settings.streaming));
+}
+
+function showStopButton() {
+ $('#mes_stop').css({ 'display': 'flex' });
+}
+
+function hideStopButton() {
+ // prevent NOOP, because hideStopButton() gets called multiple times
+ if ($('#mes_stop').css('display') !== 'none') {
+ $('#mes_stop').css({ 'display': 'none' });
+ eventSource.emit(event_types.GENERATION_ENDED, chat.length);
+ }
+}
+
+class StreamingProcessor {
+ /**
+ * Creates a new streaming processor.
+ * @param {string} type Generation type
+ * @param {boolean} forceName2 If true, force the use of name2
+ * @param {Date} timeStarted Date when generation was started
+ * @param {string} continueMessage Previous message if the type is 'continue'
+ */
+ constructor(type, forceName2, timeStarted, continueMessage) {
+ this.result = '';
+ this.messageId = -1;
+ this.messageDom = null;
+ this.messageTextDom = null;
+ this.messageTimerDom = null;
+ this.messageTokenCounterDom = null;
+ /** @type {HTMLTextAreaElement} */
+ this.sendTextarea = document.querySelector('#send_textarea');
+ this.type = type;
+ this.force_name2 = forceName2;
+ this.isStopped = false;
+ this.isFinished = false;
+ this.generator = this.nullStreamingGeneration;
+ this.abortController = new AbortController();
+ this.firstMessageText = '...';
+ this.timeStarted = timeStarted;
+ this.continueMessage = type === 'continue' ? continueMessage : '';
+ this.swipes = [];
+ /** @type {import('./scripts/logprobs.js').TokenLogprobs[]} */
+ this.messageLogprobs = [];
+ this.toolCalls = [];
+ }
+
+ #checkDomElements(messageId) {
+ if (this.messageDom === null || this.messageTextDom === null) {
+ this.messageDom = document.querySelector(`#chat .mes[mesid="${messageId}"]`);
+ this.messageTextDom = this.messageDom?.querySelector('.mes_text');
+ this.messageTimerDom = this.messageDom?.querySelector('.mes_timer');
+ this.messageTokenCounterDom = this.messageDom?.querySelector('.tokenCounterDisplay');
+ }
+ }
+
+ #updateMessageBlockVisibility() {
+ if (this.messageDom instanceof HTMLElement && Array.isArray(this.toolCalls) && this.toolCalls.length > 0) {
+ const shouldHide = ['', '...'].includes(this.result);
+ this.messageDom.classList.toggle('displayNone', shouldHide);
+ }
+ }
+
+ showMessageButtons(messageId) {
+ if (messageId == -1) {
+ return;
+ }
+
+ showStopButton();
+ $(`#chat .mes[mesid="${messageId}"] .mes_buttons`).css({ 'display': 'none' });
+ }
+
+ hideMessageButtons(messageId) {
+ if (messageId == -1) {
+ return;
+ }
+
+ hideStopButton();
+ $(`#chat .mes[mesid="${messageId}"] .mes_buttons`).css({ 'display': 'flex' });
+ }
+
+ async onStartStreaming(text) {
+ let messageId = -1;
+
+ if (this.type == 'impersonate') {
+ this.sendTextarea.value = '';
+ this.sendTextarea.dispatchEvent(new Event('input', { bubbles: true }));
+ }
+ else {
+ await saveReply(this.type, text, true);
+ messageId = chat.length - 1;
+ this.#checkDomElements(messageId);
+ this.showMessageButtons(messageId);
+ }
+
+ hideSwipeButtons();
+ scrollChatToBottom();
+ return messageId;
+ }
+
+ onProgressStreaming(messageId, text, isFinal) {
+ const isImpersonate = this.type == 'impersonate';
+ const isContinue = this.type == 'continue';
+
+ if (!isImpersonate && !isContinue && Array.isArray(this.swipes) && this.swipes.length > 0) {
+ for (let i = 0; i < this.swipes.length; i++) {
+ this.swipes[i] = cleanUpMessage(this.swipes[i], false, false, true, this.stoppingStrings);
+ }
+ }
+
+ let processedText = cleanUpMessage(text, isImpersonate, isContinue, !isFinal, this.stoppingStrings);
+
+ // Predict unbalanced asterisks / quotes during streaming
+ const charsToBalance = ['*', '"', '```'];
+ for (const char of charsToBalance) {
+ if (!isFinal && isOdd(countOccurrences(processedText, char))) {
+ // Add character at the end to balance it
+ const separator = char.length > 1 ? '\n' : '';
+ processedText = processedText.trimEnd() + separator + char;
+ }
+ }
+
+ if (isImpersonate) {
+ this.sendTextarea.value = processedText;
+ this.sendTextarea.dispatchEvent(new Event('input', { bubbles: true }));
+ }
+ else {
+ this.#checkDomElements(messageId);
+ this.#updateMessageBlockVisibility();
+ const currentTime = new Date();
+ // Don't waste time calculating token count for streaming
+ const currentTokenCount = isFinal && power_user.message_token_count_enabled ? getTokenCount(processedText, 0) : 0;
+ const timePassed = formatGenerationTimer(this.timeStarted, currentTime, currentTokenCount);
+ chat[messageId]['mes'] = processedText;
+ chat[messageId]['gen_started'] = this.timeStarted;
+ chat[messageId]['gen_finished'] = currentTime;
+
+ if (currentTokenCount) {
+ if (!chat[messageId]['extra']) {
+ chat[messageId]['extra'] = {};
+ }
+
+ chat[messageId]['extra']['token_count'] = currentTokenCount;
+ if (this.messageTokenCounterDom instanceof HTMLElement) {
+ this.messageTokenCounterDom.textContent = `${currentTokenCount}t`;
+ }
+ }
+
+ if ((this.type == 'swipe' || this.type === 'continue') && Array.isArray(chat[messageId]['swipes'])) {
+ chat[messageId]['swipes'][chat[messageId]['swipe_id']] = processedText;
+ chat[messageId]['swipe_info'][chat[messageId]['swipe_id']] = { 'send_date': chat[messageId]['send_date'], 'gen_started': chat[messageId]['gen_started'], 'gen_finished': chat[messageId]['gen_finished'], 'extra': JSON.parse(JSON.stringify(chat[messageId]['extra'])) };
+ }
+
+ const formattedText = messageFormatting(
+ processedText,
+ chat[messageId].name,
+ chat[messageId].is_system,
+ chat[messageId].is_user,
+ messageId,
+ );
+ if (this.messageTextDom instanceof HTMLElement) {
+ this.messageTextDom.innerHTML = formattedText;
+ }
+ if (this.messageTimerDom instanceof HTMLElement) {
+ this.messageTimerDom.textContent = timePassed.timerValue;
+ this.messageTimerDom.title = timePassed.timerTitle;
+ }
+ this.setFirstSwipe(messageId);
+ }
+
+ if (!scrollLock) {
+ scrollChatToBottom();
+ }
+ }
+
+ async onFinishStreaming(messageId, text) {
+ this.hideMessageButtons(this.messageId);
+ this.onProgressStreaming(messageId, text, true);
+ addCopyToCodeBlocks($(`#chat .mes[mesid="${messageId}"]`));
+
+ if (Array.isArray(this.swipes) && this.swipes.length > 0) {
+ const message = chat[messageId];
+ const swipeInfo = {
+ send_date: message.send_date,
+ gen_started: message.gen_started,
+ gen_finished: message.gen_finished,
+ extra: structuredClone(message.extra),
+ };
+ const swipeInfoArray = [];
+ swipeInfoArray.length = this.swipes.length;
+ swipeInfoArray.fill(swipeInfo);
+ chat[messageId].swipes.push(...this.swipes);
+ chat[messageId].swipe_info.push(...swipeInfoArray);
+ }
+
+ if (this.type !== 'impersonate') {
+ await eventSource.emit(event_types.MESSAGE_RECEIVED, this.messageId);
+ await eventSource.emit(event_types.CHARACTER_MESSAGE_RENDERED, this.messageId);
+ } else {
+ await eventSource.emit(event_types.IMPERSONATE_READY, text);
+ }
+
+ saveLogprobsForActiveMessage(this.messageLogprobs.filter(Boolean), this.continueMessage);
+ await saveChatConditional();
+ unblockGeneration();
+ generatedPromptCache = '';
+
+ //console.log("Generated text size:", text.length, text)
+
+ const isAborted = this.abortController.signal.aborted;
+ if (power_user.auto_swipe && !isAborted) {
+ function containsBlacklistedWords(str, blacklist, threshold) {
+ const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi');
+ const matches = str.match(regex) || [];
+ return matches.length >= threshold;
+ }
+
+ const generatedTextFiltered = (text) => {
+ if (text) {
+ if (power_user.auto_swipe_minimum_length) {
+ if (text.length < power_user.auto_swipe_minimum_length && text.length !== 0) {
+ console.log('Generated text size too small');
+ return true;
+ }
+ }
+ if (power_user.auto_swipe_blacklist_threshold) {
+ if (containsBlacklistedWords(text, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) {
+ console.log('Generated text has blacklisted words');
+ return true;
+ }
+ }
+ }
+ return false;
+ };
+
+ if (generatedTextFiltered(text)) {
+ swipe_right();
+ return;
+ }
+ }
+ playMessageSound();
+ }
+
+ onErrorStreaming() {
+ this.abortController.abort();
+ this.isStopped = true;
+
+ this.hideMessageButtons(this.messageId);
+ generatedPromptCache = '';
+ unblockGeneration();
+
+ const noEmitTypes = ['swipe', 'impersonate', 'continue'];
+ if (!noEmitTypes.includes(this.type)) {
+ eventSource.emit(event_types.MESSAGE_RECEIVED, this.messageId);
+ eventSource.emit(event_types.CHARACTER_MESSAGE_RENDERED, this.messageId);
+ }
+ }
+
+ setFirstSwipe(messageId) {
+ if (this.type !== 'swipe' && this.type !== 'impersonate') {
+ if (Array.isArray(chat[messageId]['swipes']) && chat[messageId]['swipes'].length === 1 && chat[messageId]['swipe_id'] === 0) {
+ chat[messageId]['swipes'][0] = chat[messageId]['mes'];
+ chat[messageId]['swipe_info'][0] = { 'send_date': chat[messageId]['send_date'], 'gen_started': chat[messageId]['gen_started'], 'gen_finished': chat[messageId]['gen_finished'], 'extra': JSON.parse(JSON.stringify(chat[messageId]['extra'])) };
+ }
+ }
+ }
+
+ onStopStreaming() {
+ this.abortController.abort();
+ this.isFinished = true;
+ }
+
+ /**
+ * @returns {Generator<{ text: string, swipes: string[], logprobs: import('./scripts/logprobs.js').TokenLogprobs, toolCalls: any[] }, void, void>}
+ */
+ *nullStreamingGeneration() {
+ throw new Error('Generation function for streaming is not hooked up');
+ }
+
+ async generate() {
+ if (this.messageId == -1) {
+ this.messageId = await this.onStartStreaming(this.firstMessageText);
+ await delay(1); // delay for message to be rendered
+ scrollLock = false;
+ }
+
+ // Stopping strings are expensive to calculate, especially with macros enabled. To remove stopping strings
+ // when streaming, we cache the result of getStoppingStrings instead of calling it once per token.
+ const isImpersonate = this.type == 'impersonate';
+ const isContinue = this.type == 'continue';
+ this.stoppingStrings = getStoppingStrings(isImpersonate, isContinue);
+
+ try {
+ const sw = new Stopwatch(1000 / power_user.streaming_fps);
+ const timestamps = [];
+ for await (const { text, swipes, logprobs, toolCalls } of this.generator()) {
+ timestamps.push(Date.now());
+ if (this.isStopped) {
+ return;
+ }
+
+ this.toolCalls = toolCalls;
+ this.result = text;
+ this.swipes = Array.from(swipes ?? []);
+ if (logprobs) {
+ this.messageLogprobs.push(...(Array.isArray(logprobs) ? logprobs : [logprobs]));
+ }
+ await eventSource.emit(event_types.STREAM_TOKEN_RECEIVED, text);
+ await sw.tick(() => this.onProgressStreaming(this.messageId, this.continueMessage + text));
+
+ //
+ // DAVE MOD
+ // Mode #2, Set global for incoming text ONLY.
+ //
+
+ dave_holdtext=text;
+
+ //
+ // END DAVE MOD
+ //
+
+
+ }
+ const seconds = (timestamps[timestamps.length - 1] - timestamps[0]) / 1000;
+ console.warn(`Stream stats: ${timestamps.length} tokens, ${seconds.toFixed(2)} seconds, rate: ${Number(timestamps.length / seconds).toFixed(2)} TPS`);
+ }
+ catch (err) {
+ // in the case of a self-inflicted abort, we have already cleaned up
+ if (!this.isFinished) {
+ console.error(err);
+ this.onErrorStreaming();
+ }
+ return this.result;
+ }
+
+ this.isFinished = true;
+ return this.result;
+ }
+}
+
+/**
+ * Generates a message using the provided prompt.
+ * @param {string} prompt Prompt to generate a message from
+ * @param {string} api API to use. Main API is used if not specified.
+ * @param {boolean} instructOverride true to override instruct mode, false to use the default value
+ * @param {boolean} quietToLoud true to generate a message in system mode, false to generate a message in character mode
+ * @param {string} [systemPrompt] System prompt to use. Only Instruct mode or OpenAI.
+ * @param {number} [responseLength] Maximum response length. If unset, the global default value is used.
+ * @returns {Promise} Generated message
+ */
+export async function generateRaw(prompt, api, instructOverride, quietToLoud, systemPrompt, responseLength) {
+ if (!api) {
+ api = main_api;
+ }
+
+ const abortController = new AbortController();
+ const responseLengthCustomized = typeof responseLength === 'number' && responseLength > 0;
+ const isInstruct = power_user.instruct.enabled && api !== 'openai' && api !== 'novel' && !instructOverride;
+ const isQuiet = true;
+ let eventHook = () => { };
+
+ if (systemPrompt) {
+ systemPrompt = substituteParams(systemPrompt);
+ systemPrompt = isInstruct ? formatInstructModeSystemPrompt(systemPrompt) : systemPrompt;
+ prompt = api === 'openai' ? prompt : `${systemPrompt}\n${prompt}`;
+ }
+
+ prompt = substituteParams(prompt);
+ prompt = api == 'novel' ? adjustNovelInstructionPrompt(prompt) : prompt;
+ prompt = isInstruct ? formatInstructModeChat(name1, prompt, false, true, '', name1, name2, false) : prompt;
+ prompt = isInstruct ? (prompt + formatInstructModePrompt(name2, false, '', name1, name2, isQuiet, quietToLoud)) : (prompt + '\n');
+
+ try {
+ if (responseLengthCustomized) {
+ TempResponseLength.save(api, responseLength);
+ }
+ let generateData = {};
+
+ switch (api) {
+ case 'kobold':
+ case 'koboldhorde':
+ if (preset_settings === 'gui') {
+ generateData = { prompt: prompt, gui_settings: true, max_length: amount_gen, max_context_length: max_context, api_server };
+ } else {
+ const isHorde = api === 'koboldhorde';
+ const koboldSettings = koboldai_settings[koboldai_setting_names[preset_settings]];
+ generateData = getKoboldGenerationData(prompt, koboldSettings, amount_gen, max_context, isHorde, 'quiet');
+ }
+ TempResponseLength.restore(api);
+ break;
+ case 'novel': {
+ const novelSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
+ generateData = getNovelGenerationData(prompt, novelSettings, amount_gen, false, false, null, 'quiet');
+ TempResponseLength.restore(api);
+ break;
+ }
+ case 'textgenerationwebui':
+ generateData = getTextGenGenerationData(prompt, amount_gen, false, false, null, 'quiet');
+ TempResponseLength.restore(api);
+ break;
+ case 'openai': {
+ generateData = [{ role: 'user', content: prompt.trim() }];
+ if (systemPrompt) {
+ generateData.unshift({ role: 'system', content: systemPrompt.trim() });
+ }
+ eventHook = TempResponseLength.setupEventHook(api);
+ } break;
+ }
+
+ let data = {};
+
+ if (api === 'koboldhorde') {
+ data = await generateHorde(prompt, generateData, abortController.signal, false);
+ } else if (api === 'openai') {
+ data = await sendOpenAIRequest('quiet', generateData, abortController.signal);
+ } else {
+ const generateUrl = getGenerateUrl(api);
+ const response = await fetch(generateUrl, {
+ method: 'POST',
+ headers: getRequestHeaders(),
+ cache: 'no-cache',
+ body: JSON.stringify(generateData),
+ signal: abortController.signal,
+ });
+
+ if (!response.ok) {
+ throw await response.json();
+ }
+
+ data = await response.json();
+ }
+
+ // should only happen for text completions
+ // other frontend paths do not return data if calling the backend fails,
+ // they throw things instead
+ if (data.error) {
+ throw new Error(data.response);
+ }
+
+ const message = cleanUpMessage(extractMessageFromData(data), false, false, true);
+
+ if (!message) {
+ throw new Error('No message generated');
+ }
+
+ return message;
+ } finally {
+ if (responseLengthCustomized && TempResponseLength.isCustomized()) {
+ TempResponseLength.restore(api);
+ TempResponseLength.removeEventHook(api, eventHook);
+ }
+ }
+}
+
+class TempResponseLength {
+ static #originalResponseLength = -1;
+ static #lastApi = null;
+
+ static isCustomized() {
+ return this.#originalResponseLength > -1;
+ }
+
+ /**
+ * Save the current response length for the specified API.
+ * @param {string} api API identifier
+ * @param {number} responseLength New response length
+ */
+ static save(api, responseLength) {
+ if (api === 'openai') {
+ this.#originalResponseLength = oai_settings.openai_max_tokens;
+ oai_settings.openai_max_tokens = responseLength;
+ } else {
+ this.#originalResponseLength = amount_gen;
+ amount_gen = responseLength;
+ }
+
+ this.#lastApi = api;
+ console.log('[TempResponseLength] Saved original response length:', TempResponseLength.#originalResponseLength);
+ }
+
+ /**
+ * Restore the original response length for the specified API.
+ * @param {string|null} api API identifier
+ * @returns {void}
+ */
+ static restore(api) {
+ if (this.#originalResponseLength === -1) {
+ return;
+ }
+ if (!api && this.#lastApi) {
+ api = this.#lastApi;
+ }
+ if (api === 'openai') {
+ oai_settings.openai_max_tokens = this.#originalResponseLength;
+ } else {
+ amount_gen = this.#originalResponseLength;
+ }
+
+ console.log('[TempResponseLength] Restored original response length:', this.#originalResponseLength);
+ this.#originalResponseLength = -1;
+ this.#lastApi = null;
+ }
+
+ /**
+ * Sets up an event hook to restore the original response length when the event is emitted.
+ * @param {string} api API identifier
+ * @returns {function(): void} Event hook function
+ */
+ static setupEventHook(api) {
+ const eventHook = () => {
+ if (this.isCustomized()) {
+ this.restore(api);
+ }
+ };
+
+ switch (api) {
+ case 'openai':
+ eventSource.once(event_types.CHAT_COMPLETION_SETTINGS_READY, eventHook);
+ break;
+ default:
+ eventSource.once(event_types.GENERATE_AFTER_DATA, eventHook);
+ break;
+ }
+
+ return eventHook;
+ }
+
+ /**
+ * Removes the event hook for the specified API.
+ * @param {string} api API identifier
+ * @param {function(): void} eventHook Previously set up event hook
+ */
+ static removeEventHook(api, eventHook) {
+ switch (api) {
+ case 'openai':
+ eventSource.removeListener(event_types.CHAT_COMPLETION_SETTINGS_READY, eventHook);
+ break;
+ default:
+ eventSource.removeListener(event_types.GENERATE_AFTER_DATA, eventHook);
+ break;
+ }
+ }
+}
+
+/**
+ * Removes last message from the chat DOM.
+ * @returns {Promise} Resolves when the message is removed.
+ */
+function removeLastMessage() {
+ return new Promise((resolve) => {
+ const lastMes = $('#chat').children('.mes').last();
+ if (lastMes.length === 0) {
+ return resolve();
+ }
+ lastMes.hide(animation_duration, function () {
+ $(this).remove();
+ resolve();
+ });
+ });
+}
+
+/**
+ * Runs a generation using the current chat context.
+ * @param {string} type Generation type
+ * @param {GenerateOptions} options Generation options
+ * @param {boolean} dryRun Whether to actually generate a message or just assemble the prompt
+ * @returns {Promise} Returns a promise that resolves when the text is done generating.
+ * @typedef {{automatic_trigger?: boolean, force_name2?: boolean, quiet_prompt?: string, quietToLoud?: boolean, skipWIAN?: boolean, force_chid?: number, signal?: AbortSignal, quietImage?: string, quietName?: string, depth?: number }} GenerateOptions
+ */
+export async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage, quietName, depth = 0 } = {}, dryRun = false) {
+ console.log('Generate entered');
+ setGenerationProgress(0);
+ generation_started = new Date();
+
+ // Occurs every time, even if the generation is aborted due to slash commands execution
+ await eventSource.emit(event_types.GENERATION_STARTED, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun);
+
+ // Don't recreate abort controller if signal is passed
+ if (!(abortController && signal)) {
+ abortController = new AbortController();
+ }
+
+ // OpenAI doesn't need instruct mode. Use OAI main prompt instead.
+ const isInstruct = power_user.instruct.enabled && main_api !== 'openai';
+ const isImpersonate = type == 'impersonate';
+
+ if (!(dryRun || type == 'regenerate' || type == 'swipe' || type == 'quiet')) {
+ const interruptedByCommand = await processCommands(String($('#send_textarea').val()));
+
+ if (interruptedByCommand) {
+ //$("#send_textarea").val('')[0].dispatchEvent(new Event('input', { bubbles:true }));
+ unblockGeneration(type);
+ return Promise.resolve();
+ }
+ }
+
+ // Occurs only if the generation is not aborted due to slash commands execution
+ await eventSource.emit(event_types.GENERATION_AFTER_COMMANDS, type, { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage }, dryRun);
+
+ if (main_api == 'kobold' && kai_settings.streaming_kobold && !kai_flags.can_use_streaming) {
+ toastr.error(t`Streaming is enabled, but the version of Kobold used does not support token streaming.`, undefined, { timeOut: 10000, preventDuplicates: true });
+ unblockGeneration(type);
+ return Promise.resolve();
+ }
+
+ if (isHordeGenerationNotAllowed()) {
+ unblockGeneration(type);
+ return Promise.resolve();
+ }
+
+ if (!dryRun) {
+ // Ping server to make sure it is still alive
+ const pingResult = await pingServer();
+
+ if (!pingResult) {
+ unblockGeneration(type);
+ toastr.error(t`Verify that the server is running and accessible.`, t`ST Server cannot be reached`);
+ throw new Error('Server unreachable');
+ }
+
+ // Hide swipes if not in a dry run.
+ hideSwipeButtons();
+ // If generated any message, set the flag to indicate it can't be recreated again.
+ chat_metadata['tainted'] = true;
+ }
+
+ if (selected_group && !is_group_generating) {
+ if (!dryRun) {
+ // Returns the promise that generateGroupWrapper returns; resolves when generation is done
+ return generateGroupWrapper(false, type, { quiet_prompt, force_chid, signal: abortController.signal, quietImage });
+ }
+
+ const characterIndexMap = new Map(characters.map((char, index) => [char.avatar, index]));
+ const group = groups.find((x) => x.id === selected_group);
+
+ const enabledMembers = group.members.reduce((acc, member) => {
+ if (!group.disabled_members.includes(member) && !acc.includes(member)) {
+ acc.push(member);
+ }
+ return acc;
+ }, []);
+
+ const memberIds = enabledMembers
+ .map((member) => characterIndexMap.get(member))
+ .filter((index) => index !== undefined && index !== null);
+
+ if (memberIds.length > 0) {
+ if (menu_type != 'character_edit') setCharacterId(memberIds[0]);
+ setCharacterName('');
+ } else {
+ console.log('No enabled members found');
+ unblockGeneration(type);
+ return Promise.resolve();
+ }
+ }
+
+ //#########QUIET PROMPT STUFF##############
+ //this function just gives special care to novel quiet instruction prompts
+ if (quiet_prompt) {
+ quiet_prompt = substituteParams(quiet_prompt);
+ quiet_prompt = main_api == 'novel' && !quietToLoud ? adjustNovelInstructionPrompt(quiet_prompt) : quiet_prompt;
+ }
+
+ const hasBackendConnection = online_status !== 'no_connection';
+
+ // We can't do anything because we're not in a chat right now. (Unless it's a dry run, in which case we need to
+ // assemble the prompt so we can count its tokens regardless of whether a chat is active.)
+ if (!dryRun && !hasBackendConnection) {
+ is_send_press = false;
+ return Promise.resolve();
+ }
+
+ let textareaText;
+ if (type !== 'regenerate' && type !== 'swipe' && type !== 'quiet' && !isImpersonate && !dryRun) {
+ is_send_press = true;
+ textareaText = String($('#send_textarea').val());
+ $('#send_textarea').val('')[0].dispatchEvent(new Event('input', { bubbles: true }));
+ } else {
+ textareaText = '';
+ if (chat.length && chat[chat.length - 1]['is_user']) {
+ //do nothing? why does this check exist?
+ }
+ else if (type !== 'quiet' && type !== 'swipe' && !isImpersonate && !dryRun && chat.length) {
+ chat.length = chat.length - 1;
+ await removeLastMessage();
+ await eventSource.emit(event_types.MESSAGE_DELETED, chat.length);
+ }
+ }
+
+ const isContinue = type == 'continue';
+
+ // Rewrite the generation timer to account for the time passed for all the continuations.
+ if (isContinue && chat.length) {
+ const prevFinished = chat[chat.length - 1]['gen_finished'];
+ const prevStarted = chat[chat.length - 1]['gen_started'];
+
+ if (prevFinished && prevStarted) {
+ const timePassed = prevFinished - prevStarted;
+ generation_started = new Date(Date.now() - timePassed);
+ chat[chat.length - 1]['gen_started'] = generation_started;
+ }
+ }
+
+ if (!dryRun) {
+ deactivateSendButtons();
+ }
+
+ let { messageBias, promptBias, isUserPromptBias } = getBiasStrings(textareaText, type);
+
+ //*********************************
+ //PRE FORMATING STRING
+ //*********************************
+
+ // These generation types should not attach pending files to the chat
+ const noAttachTypes = [
+ 'regenerate',
+ 'swipe',
+ 'impersonate',
+ 'quiet',
+ 'continue',
+ 'ask_command',
+ ];
+ //for normal messages sent from user..
+ if ((textareaText != '' || (hasPendingFileAttachment() && !noAttachTypes.includes(type))) && !automatic_trigger && type !== 'quiet' && !dryRun) {
+ // If user message contains no text other than bias - send as a system message
+ if (messageBias && !removeMacros(textareaText)) {
+ sendSystemMessage(system_message_types.GENERIC, ' ', { bias: messageBias });
+ }
+ else {
+ await sendMessageAsUser(textareaText, messageBias);
+ }
+ }
+ else if (textareaText == '' && !automatic_trigger && !dryRun && type === undefined && main_api == 'openai' && oai_settings.send_if_empty.trim().length > 0) {
+ // Use send_if_empty if set and the user message is empty. Only when sending messages normally
+ await sendMessageAsUser(oai_settings.send_if_empty.trim(), messageBias);
+ }
+
+ let {
+ description,
+ personality,
+ persona,
+ scenario,
+ mesExamples,
+ system,
+ jailbreak,
+ } = getCharacterCardFields();
+
+ if (main_api !== 'openai') {
+ if (power_user.sysprompt.enabled) {
+ system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.sysprompt.content, name1, name2);
+ system = isInstruct ? formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.sysprompt.content)) : system;
+ } else {
+ // Nullify if it's not enabled
+ system = '';
+ }
+ }
+
+ // Depth prompt (character-specific A/N)
+ removeDepthPrompts();
+ const groupDepthPrompts = getGroupDepthPrompts(selected_group, Number(this_chid));
+
+ if (selected_group && Array.isArray(groupDepthPrompts) && groupDepthPrompts.length > 0) {
+ groupDepthPrompts.forEach((value, index) => {
+ const role = getExtensionPromptRoleByName(value.role);
+ setExtensionPrompt('DEPTH_PROMPT_' + index, value.text, extension_prompt_types.IN_CHAT, value.depth, extension_settings.note.allowWIScan, role);
+ });
+ } else {
+ const depthPromptText = baseChatReplace(characters[this_chid]?.data?.extensions?.depth_prompt?.prompt?.trim(), name1, name2) || '';
+ const depthPromptDepth = characters[this_chid]?.data?.extensions?.depth_prompt?.depth ?? depth_prompt_depth_default;
+ const depthPromptRole = getExtensionPromptRoleByName(characters[this_chid]?.data?.extensions?.depth_prompt?.role ?? depth_prompt_role_default);
+ setExtensionPrompt('DEPTH_PROMPT', depthPromptText, extension_prompt_types.IN_CHAT, depthPromptDepth, extension_settings.note.allowWIScan, depthPromptRole);
+ }
+
+ // First message in fresh 1-on-1 chat reacts to user/character settings changes
+ if (chat.length) {
+ chat[0].mes = substituteParams(chat[0].mes);
+ }
+
+ // Collect messages with usable content
+ const canUseTools = ToolManager.isToolCallingSupported();
+ const canPerformToolCalls = !dryRun && ToolManager.canPerformToolCalls(type) && depth < ToolManager.RECURSE_LIMIT;
+ let coreChat = chat.filter(x => !x.is_system || (canUseTools && Array.isArray(x.extra?.tool_invocations)));
+ if (type === 'swipe') {
+ coreChat.pop();
+ }
+
+ coreChat = await Promise.all(coreChat.map(async (chatItem, index) => {
+ let message = chatItem.mes;
+ let regexType = chatItem.is_user ? regex_placement.USER_INPUT : regex_placement.AI_OUTPUT;
+ let options = { isPrompt: true, depth: (coreChat.length - index - 1) };
+
+ let regexedMessage = getRegexedString(message, regexType, options);
+ regexedMessage = await appendFileContent(chatItem, regexedMessage);
+
+ if (chatItem?.extra?.append_title && chatItem?.extra?.title) {
+ regexedMessage = `${regexedMessage}\n\n${chatItem.extra.title}`;
+ }
+
+ return {
+ ...chatItem,
+ mes: regexedMessage,
+ index,
+ };
+ }));
+
+ // Determine token limit
+ let this_max_context = getMaxContextSize();
+
+ if (!dryRun) {
+ console.debug('Running extension interceptors');
+ const aborted = await runGenerationInterceptors(coreChat, this_max_context, type);
+
+ if (aborted) {
+ console.debug('Generation aborted by extension interceptors');
+ unblockGeneration(type);
+ return Promise.resolve();
+ }
+ } else {
+ console.debug('Skipping extension interceptors for dry run');
+ }
+
+ // Adjust token limit for Horde
+ let adjustedParams;
+ if (main_api == 'koboldhorde' && (horde_settings.auto_adjust_context_length || horde_settings.auto_adjust_response_length)) {
+ try {
+ adjustedParams = await adjustHordeGenerationParams(max_context, amount_gen);
+ }
+ catch {
+ unblockGeneration(type);
+ return Promise.resolve();
+ }
+ if (horde_settings.auto_adjust_context_length) {
+ this_max_context = (adjustedParams.maxContextLength - adjustedParams.maxLength);
+ }
+ }
+
+ // Fetches the combined prompt for both negative and positive prompts
+ const cfgGuidanceScale = getGuidanceScale();
+ const useCfgPrompt = cfgGuidanceScale && cfgGuidanceScale.value !== 1;
+
+ // Adjust max context based on CFG prompt to prevent overfitting
+ if (useCfgPrompt) {
+ const negativePrompt = getCfgPrompt(cfgGuidanceScale, true, true)?.value || '';
+ const positivePrompt = getCfgPrompt(cfgGuidanceScale, false, true)?.value || '';
+ if (negativePrompt || positivePrompt) {
+ const previousMaxContext = this_max_context;
+ const [negativePromptTokenCount, positivePromptTokenCount] = await Promise.all([getTokenCountAsync(negativePrompt), getTokenCountAsync(positivePrompt)]);
+ const decrement = Math.max(negativePromptTokenCount, positivePromptTokenCount);
+ this_max_context -= decrement;
+ console.log(`Max context reduced by ${decrement} tokens of CFG prompt (${previousMaxContext} -> ${this_max_context})`);
+ }
+ }
+
+ console.log(`Core/all messages: ${coreChat.length}/${chat.length}`);
+
+ // kingbri MARK: - Make sure the prompt bias isn't the same as the user bias
+ if ((promptBias && !isUserPromptBias) || power_user.always_force_name2 || main_api == 'novel') {
+ force_name2 = true;
+ }
+
+ if (isImpersonate) {
+ force_name2 = false;
+ }
+
+ // TODO (kingbri): Migrate to a utility function
+ /**
+ * Parses an examples string.
+ * @param {string} examplesStr
+ * @returns {string[]} Examples array with block heading
+ */
+ function parseMesExamples(examplesStr) {
+ if (!examplesStr || examplesStr.length === 0 || examplesStr === '') {
+ return [];
+ }
+
+ if (!examplesStr.startsWith('')) {
+ examplesStr = '\n' + examplesStr.trim();
+ }
+
+ const exampleSeparator = power_user.context.example_separator ? `${substituteParams(power_user.context.example_separator)}\n` : '';
+ const blockHeading = main_api === 'openai' ? '\n' : (exampleSeparator || (isInstruct ? '\n' : ''));
+ const splitExamples = examplesStr.split(//gi).slice(1).map(block => `${blockHeading}${block.trim()}\n`);
+
+ return splitExamples;
+ }
+
+ let mesExamplesArray = parseMesExamples(mesExamples);
+
+ //////////////////////////////////
+ // Extension added strings
+ // Set non-WI AN
+ setFloatingPrompt();
+ // Add persona description to prompt
+ addPersonaDescriptionExtensionPrompt();
+
+ // Add WI to prompt (and also inject WI to AN value via hijack)
+ // Make quiet prompt available for WIAN
+ setExtensionPrompt('QUIET_PROMPT', quiet_prompt || '', extension_prompt_types.IN_PROMPT, 0, true);
+ const chatForWI = coreChat.map(x => world_info_include_names ? `${x.name}: ${x.mes}` : x.mes).reverse();
+ const { worldInfoString, worldInfoBefore, worldInfoAfter, worldInfoExamples, worldInfoDepth } = await getWorldInfoPrompt(chatForWI, this_max_context, dryRun);
+ setExtensionPrompt('QUIET_PROMPT', '', extension_prompt_types.IN_PROMPT, 0, true);
+
+ // Add message example WI
+ for (const example of worldInfoExamples) {
+ const exampleMessage = example.content;
+
+ if (exampleMessage.length === 0) {
+ continue;
+ }
+
+ const formattedExample = baseChatReplace(exampleMessage, name1, name2);
+ const cleanedExample = parseMesExamples(formattedExample);
+
+ // Insert depending on before or after position
+ if (example.position === wi_anchor_position.before) {
+ mesExamplesArray.unshift(...cleanedExample);
+ } else {
+ mesExamplesArray.push(...cleanedExample);
+ }
+ }
+
+ // At this point, the raw message examples can be created
+ const mesExamplesRawArray = [...mesExamplesArray];
+
+ if (mesExamplesArray && isInstruct) {
+ mesExamplesArray = formatInstructModeExamples(mesExamplesArray, name1, name2);
+ }
+
+ if (skipWIAN !== true) {
+ console.log('skipWIAN not active, adding WIAN');
+ // Add all depth WI entries to prompt
+ flushWIDepthInjections();
+ if (Array.isArray(worldInfoDepth)) {
+ worldInfoDepth.forEach((e) => {
+ const joinedEntries = e.entries.join('\n');
+ setExtensionPrompt(`customDepthWI-${e.depth}-${e.role}`, joinedEntries, extension_prompt_types.IN_CHAT, e.depth, false, e.role);
+ });
+ }
+ } else {
+ console.log('skipping WIAN');
+ }
+
+ // Inject all Depth prompts. Chat Completion does it separately
+ let injectedIndices = [];
+ if (main_api !== 'openai') {
+ injectedIndices = await doChatInject(coreChat, isContinue);
+ }
+
+ // Insert character jailbreak as the last user message (if exists, allowed, preferred, and not using Chat Completion)
+ if (power_user.context.allow_jailbreak && power_user.prefer_character_jailbreak && main_api !== 'openai' && jailbreak) {
+ // Set "original" explicity to empty string since there's no original
+ jailbreak = substituteParams(jailbreak, name1, name2, '');
+
+ // When continuing generation of previous output, last user message precedes the message to continue
+ if (isContinue) {
+ coreChat.splice(coreChat.length - 1, 0, { mes: jailbreak, is_user: true });
+ }
+ else {
+ coreChat.push({ mes: jailbreak, is_user: true });
+ }
+ }
+
+ let chat2 = [];
+ let continue_mag = '';
+ const userMessageIndices = [];
+ const lastUserMessageIndex = coreChat.findLastIndex(x => x.is_user);
+
+ for (let i = coreChat.length - 1, j = 0; i >= 0; i--, j++) {
+ if (main_api == 'openai') {
+ chat2[i] = coreChat[j].mes;
+ if (i === 0 && isContinue) {
+ chat2[i] = chat2[i].slice(0, chat2[i].lastIndexOf(coreChat[j].mes) + coreChat[j].mes.length);
+ continue_mag = coreChat[j].mes;
+ }
+ continue;
+ }
+
+ chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, false);
+
+ if (j === 0 && isInstruct) {
+ // Reformat with the first output sequence (if any)
+ chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.FIRST);
+ }
+
+ if (lastUserMessageIndex >= 0 && j === lastUserMessageIndex && isInstruct) {
+ // Reformat with the last input sequence (if any)
+ chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.LAST);
+ }
+
+ // Do not suffix the message for continuation
+ if (i === 0 && isContinue) {
+ if (isInstruct) {
+ // Reformat with the last output sequence (if any)
+ chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.LAST);
+ }
+
+ chat2[i] = chat2[i].slice(0, chat2[i].lastIndexOf(coreChat[j].mes) + coreChat[j].mes.length);
+ continue_mag = coreChat[j].mes;
+ }
+
+ if (coreChat[j].is_user) {
+ userMessageIndices.push(i);
+ }
+ }
+
+ let addUserAlignment = isInstruct && power_user.instruct.user_alignment_message;
+ let userAlignmentMessage = '';
+
+ if (addUserAlignment) {
+ const alignmentMessage = {
+ name: name1,
+ mes: substituteParams(power_user.instruct.user_alignment_message),
+ is_user: true,
+ };
+ userAlignmentMessage = formatMessageHistoryItem(alignmentMessage, isInstruct, force_output_sequence.FIRST);
+ }
+
+ // Call combined AN into Generate
+ const beforeScenarioAnchor = (await getExtensionPrompt(extension_prompt_types.BEFORE_PROMPT)).trimStart();
+ const afterScenarioAnchor = await getExtensionPrompt(extension_prompt_types.IN_PROMPT);
+
+ const storyStringParams = {
+ description: description,
+ personality: personality,
+ persona: power_user.persona_description_position == persona_description_positions.IN_PROMPT ? persona : '',
+ scenario: scenario,
+ system: system,
+ char: name2,
+ user: name1,
+ wiBefore: worldInfoBefore,
+ wiAfter: worldInfoAfter,
+ loreBefore: worldInfoBefore,
+ loreAfter: worldInfoAfter,
+ mesExamples: mesExamplesArray.join(''),
+ mesExamplesRaw: mesExamplesRawArray.join(''),
+ };
+
+ const storyString = renderStoryString(storyStringParams);
+
+ // Story string rendered, safe to remove
+ if (power_user.strip_examples) {
+ mesExamplesArray = [];
+ }
+
+ let oaiMessages = [];
+ let oaiMessageExamples = [];
+
+ if (main_api === 'openai') {
+ oaiMessages = setOpenAIMessages(coreChat);
+ oaiMessageExamples = setOpenAIMessageExamples(mesExamplesArray);
+ }
+
+ // hack for regeneration of the first message
+ if (chat2.length == 0) {
+ chat2.push('');
+ }
+
+ let examplesString = '';
+ let chatString = addChatsPreamble(addChatsSeparator(''));
+ let cyclePrompt = '';
+
+ async function getMessagesTokenCount() {
+ const encodeString = [
+ beforeScenarioAnchor,
+ storyString,
+ afterScenarioAnchor,
+ examplesString,
+ userAlignmentMessage,
+ chatString,
+ modifyLastPromptLine(''),
+ cyclePrompt,
+ ].join('').replace(/\r/gm, '');
+ return getTokenCountAsync(encodeString, power_user.token_padding);
+ }
+
+ // Force pinned examples into the context
+ let pinExmString;
+ if (power_user.pin_examples) {
+ pinExmString = examplesString = mesExamplesArray.join('');
+ }
+
+ // Only add the chat in context if past the greeting message
+ if (isContinue && (chat2.length > 1 || main_api === 'openai')) {
+ cyclePrompt = chat2.shift();
+ }
+
+ // Collect enough messages to fill the context
+ let arrMes = new Array(chat2.length);
+ let tokenCount = await getMessagesTokenCount();
+ let lastAddedIndex = -1;
+
+ // Pre-allocate all injections first.
+ // If it doesn't fit - user shot himself in the foot
+ for (const index of injectedIndices) {
+ const item = chat2[index];
+
+ if (typeof item !== 'string') {
+ continue;
+ }
+
+ tokenCount += await getTokenCountAsync(item.replace(/\r/gm, ''));
+ if (tokenCount < this_max_context) {
+ chatString = chatString + item;
+ arrMes[index] = item;
+ lastAddedIndex = Math.max(lastAddedIndex, index);
+ } else {
+ break;
+ }
+ }
+
+ for (let i = 0; i < chat2.length; i++) {
+ // not needed for OAI prompting
+ if (main_api == 'openai') {
+ break;
+ }
+
+ // Skip already injected messages
+ if (arrMes[i] !== undefined) {
+ continue;
+ }
+
+ const item = chat2[i];
+
+ if (typeof item !== 'string') {
+ continue;
+ }
+
+ tokenCount += await getTokenCountAsync(item.replace(/\r/gm, ''));
+ if (tokenCount < this_max_context) {
+ chatString = chatString + item;
+ arrMes[i] = item;
+ lastAddedIndex = Math.max(lastAddedIndex, i);
+ } else {
+ break;
+ }
+ }
+
+ // Add user alignment message if last message is not a user message
+ const stoppedAtUser = userMessageIndices.includes(lastAddedIndex);
+ if (addUserAlignment && !stoppedAtUser) {
+ tokenCount += await getTokenCountAsync(userAlignmentMessage.replace(/\r/gm, ''));
+ chatString = userAlignmentMessage + chatString;
+ arrMes.push(userAlignmentMessage);
+ injectedIndices.push(arrMes.length - 1);
+ }
+
+ // Unsparse the array. Adjust injected indices
+ const newArrMes = [];
+ const newInjectedIndices = [];
+ for (let i = 0; i < arrMes.length; i++) {
+ if (arrMes[i] !== undefined) {
+ newArrMes.push(arrMes[i]);
+ if (injectedIndices.includes(i)) {
+ newInjectedIndices.push(newArrMes.length - 1);
+ }
+ }
+ }
+
+ arrMes = newArrMes;
+ injectedIndices = newInjectedIndices;
+
+ if (main_api !== 'openai') {
+ setInContextMessages(arrMes.length - injectedIndices.length, type);
+ }
+
+ // Estimate how many unpinned example messages fit in the context
+ tokenCount = await getMessagesTokenCount();
+ let count_exm_add = 0;
+ if (!power_user.pin_examples) {
+ for (let example of mesExamplesArray) {
+ tokenCount += await getTokenCountAsync(example.replace(/\r/gm, ''));
+ examplesString += example;
+ if (tokenCount < this_max_context) {
+ count_exm_add++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ let mesSend = [];
+ console.debug('calling runGenerate');
+
+ if (isContinue) {
+ // Coping mechanism for OAI spacing
+ if (main_api === 'openai' && !cyclePrompt.endsWith(' ')) {
+ cyclePrompt += oai_settings.continue_postfix;
+ continue_mag += oai_settings.continue_postfix;
+ }
+ }
+
+ const originalType = type;
+
+ if (!dryRun) {
+ is_send_press = true;
+ }
+
+ generatedPromptCache += cyclePrompt;
+ if (generatedPromptCache.length == 0 || type === 'continue') {
+ console.debug('generating prompt');
+ chatString = '';
+ arrMes = arrMes.reverse();
+ arrMes.forEach(function (item, i, arr) {
+ // OAI doesn't need all of this
+ if (main_api === 'openai') {
+ return;
+ }
+
+ // Cohee: This removes a newline from the end of the last message in the context
+ // Last prompt line will add a newline if it's not a continuation
+ // In instruct mode it only removes it if wrap is enabled and it's not a quiet generation
+ if (i === arrMes.length - 1 && type !== 'continue') {
+ if (!isInstruct || (power_user.instruct.wrap && type !== 'quiet')) {
+ item = item.replace(/\n?$/, '');
+ }
+ }
+
+ mesSend[mesSend.length] = { message: item, extensionPrompts: [] };
+ });
+ }
+
+ let mesExmString = '';
+
+ function setPromptString() {
+ if (main_api == 'openai') {
+ return;
+ }
+
+ console.debug('--setting Prompt string');
+ mesExmString = pinExmString ?? mesExamplesArray.slice(0, count_exm_add).join('');
+
+ if (mesSend.length) {
+ mesSend[mesSend.length - 1].message = modifyLastPromptLine(mesSend[mesSend.length - 1].message);
+ }
+ }
+
+ function modifyLastPromptLine(lastMesString) {
+ //#########QUIET PROMPT STUFF PT2##############
+
+ // Add quiet generation prompt at depth 0
+ if (quiet_prompt && quiet_prompt.length) {
+
+ // here name1 is forced for all quiet prompts..why?
+ const name = name1;
+ //checks if we are in instruct, if so, formats the chat as such, otherwise just adds the quiet prompt
+ const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, '', name1, name2, false) : `\n${quiet_prompt}`;
+
+ //This begins to fix quietPrompts (particularly /sysgen) for instruct
+ //previously instruct input sequence was being appended to the last chat message w/o '\n'
+ //and no output sequence was added after the input's content.
+ //TODO: respect output_sequence vs last_output_sequence settings
+ //TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc.
+ if (isInstruct) {
+ lastMesString += quietAppend; // + power_user.instruct.output_sequence + '\n';
+ } else {
+ lastMesString += quietAppend;
+ }
+
+
+ // Ross: bailing out early prevents quiet prompts from respecting other instruct prompt toggles
+ // for sysgen, SD, and summary this is desireable as it prevents the AI from responding as char..
+ // but for idle prompting, we want the flexibility of the other prompt toggles, and to respect them as per settings in the extension
+ // need a detection for what the quiet prompt is being asked for...
+
+ // Bail out early?
+ if (!isInstruct && !quietToLoud) {
+ return lastMesString;
+ }
+ }
+
+
+ // Get instruct mode line
+ if (isInstruct && !isContinue) {
+ const name = (quiet_prompt && !quietToLoud && !isImpersonate) ? (quietName ?? 'System') : (isImpersonate ? name1 : name2);
+ const isQuiet = quiet_prompt && type == 'quiet';
+ lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2, isQuiet, quietToLoud);
+ }
+
+ // Get non-instruct impersonation line
+ if (!isInstruct && isImpersonate && !isContinue) {
+ const name = name1;
+ if (!lastMesString.endsWith('\n')) {
+ lastMesString += '\n';
+ }
+ lastMesString += name + ':';
+ }
+
+ // Add character's name
+ // Force name append on continue (if not continuing on user message or first message)
+ const isContinuingOnFirstMessage = chat.length === 1 && isContinue;
+ if (!isInstruct && force_name2 && !isContinuingOnFirstMessage) {
+ if (!lastMesString.endsWith('\n')) {
+ lastMesString += '\n';
+ }
+ if (!isContinue || !(chat[chat.length - 1]?.is_user)) {
+ lastMesString += `${name2}:`;
+ }
+ }
+
+ return lastMesString;
+ }
+
+ // Clean up the already generated prompt for seamless addition
+ function cleanupPromptCache(promptCache) {
+ // Remove the first occurrance of character's name
+ if (promptCache.trimStart().startsWith(`${name2}:`)) {
+ promptCache = promptCache.replace(`${name2}:`, '').trimStart();
+ }
+
+ // Remove the first occurrance of prompt bias
+ if (promptCache.trimStart().startsWith(promptBias)) {
+ promptCache = promptCache.replace(promptBias, '');
+ }
+
+ // Add a space if prompt cache doesn't start with one
+ if (!/^\s/.test(promptCache) && !isInstruct) {
+ promptCache = ' ' + promptCache;
+ }
+
+ return promptCache;
+ }
+
+ async function checkPromptSize() {
+ console.debug('---checking Prompt size');
+ setPromptString();
+ const jointMessages = mesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join('');
+ const prompt = [
+ beforeScenarioAnchor,
+ storyString,
+ afterScenarioAnchor,
+ mesExmString,
+ addChatsPreamble(addChatsSeparator(jointMessages)),
+ '\n',
+ modifyLastPromptLine(''),
+ generatedPromptCache,
+ ].join('').replace(/\r/gm, '');
+ let thisPromptContextSize = await getTokenCountAsync(prompt, power_user.token_padding);
+
+ if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size...
+ if (count_exm_add > 0) { // ..and we have example mesages..
+ count_exm_add--; // remove the example messages...
+ await checkPromptSize(); // and try agin...
+ } else if (mesSend.length > 0) { // if the chat history is longer than 0
+ mesSend.shift(); // remove the first (oldest) chat entry..
+ await checkPromptSize(); // and check size again..
+ } else {
+ //end
+ console.debug(`---mesSend.length = ${mesSend.length}`);
+ }
+ }
+ }
+
+ if (generatedPromptCache.length > 0 && main_api !== 'openai') {
+ console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length);
+ await checkPromptSize();
+ } else {
+ console.debug('---calling setPromptString ' + generatedPromptCache.length);
+ setPromptString();
+ }
+
+ // For prompt bit itemization
+ let mesSendString = '';
+
+ function getCombinedPrompt(isNegative) {
+ // Only return if the guidance scale doesn't exist or the value is 1
+ // Also don't return if constructing the neutral prompt
+ if (isNegative && !useCfgPrompt) {
+ return;
+ }
+
+ // OAI has its own prompt manager. No need to do anything here
+ if (main_api === 'openai') {
+ return '';
+ }
+
+ // Deep clone
+ let finalMesSend = structuredClone(mesSend);
+
+ if (useCfgPrompt) {
+ const cfgPrompt = getCfgPrompt(cfgGuidanceScale, isNegative);
+ if (cfgPrompt.value) {
+ if (cfgPrompt.depth === 0) {
+ finalMesSend[finalMesSend.length - 1].message +=
+ /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
+ ? cfgPrompt.value
+ : ` ${cfgPrompt.value}`;
+ } else {
+ // TODO: Make all extension prompts use an array/splice method
+ const lengthDiff = mesSend.length - cfgPrompt.depth;
+ const cfgDepth = lengthDiff >= 0 ? lengthDiff : 0;
+ finalMesSend[cfgDepth].extensionPrompts.push(`${cfgPrompt.value}\n`);
+ }
+ }
+ }
+
+ // Add prompt bias after everything else
+ // Always run with continue
+ if (!isInstruct && !isImpersonate) {
+ if (promptBias.trim().length !== 0) {
+ finalMesSend[finalMesSend.length - 1].message +=
+ /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
+ ? promptBias.trimStart()
+ : ` ${promptBias.trimStart()}`;
+ }
+ }
+
+ // Prune from prompt cache if it exists
+ if (generatedPromptCache.length !== 0) {
+ generatedPromptCache = cleanupPromptCache(generatedPromptCache);
+ }
+
+ // Flattens the multiple prompt objects to a string.
+ const combine = () => {
+ // Right now, everything is suffixed with a newline
+ mesSendString = finalMesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join('');
+
+ // add a custom dingus (if defined)
+ mesSendString = addChatsSeparator(mesSendString);
+
+ // add chat preamble
+ mesSendString = addChatsPreamble(mesSendString);
+
+ let combinedPrompt = beforeScenarioAnchor +
+ storyString +
+ afterScenarioAnchor +
+ mesExmString +
+ mesSendString +
+ generatedPromptCache;
+
+ combinedPrompt = combinedPrompt.replace(/\r/gm, '');
+
+ if (power_user.collapse_newlines) {
+ combinedPrompt = collapseNewlines(combinedPrompt);
+ }
+
+ return combinedPrompt;
+ };
+
+ finalMesSend.forEach((item, i) => {
+ item.injected = injectedIndices.includes(finalMesSend.length - i - 1);
+ });
+
+ let data = {
+ api: main_api,
+ combinedPrompt: null,
+ description,
+ personality,
+ persona,
+ scenario,
+ char: name2,
+ user: name1,
+ worldInfoBefore,
+ worldInfoAfter,
+ beforeScenarioAnchor,
+ afterScenarioAnchor,
+ storyString,
+ mesExmString,
+ mesSendString,
+ finalMesSend,
+ generatedPromptCache,
+ main: system,
+ jailbreak,
+ naiPreamble: nai_settings.preamble,
+ };
+
+ // Before returning the combined prompt, give available context related information to all subscribers.
+ eventSource.emitAndWait(event_types.GENERATE_BEFORE_COMBINE_PROMPTS, data);
+
+ // If one or multiple subscribers return a value, forfeit the responsibillity of flattening the context.
+ return !data.combinedPrompt ? combine() : data.combinedPrompt;
+ }
+
+ let finalPrompt = getCombinedPrompt(false);
+
+ const eventData = { prompt: finalPrompt, dryRun: dryRun };
+ await eventSource.emit(event_types.GENERATE_AFTER_COMBINE_PROMPTS, eventData);
+ finalPrompt = eventData.prompt;
+
+ let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate
+ let thisPromptBits = [];
+
+ let generate_data;
+ switch (main_api) {
+ case 'koboldhorde':
+ case 'kobold':
+ if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
+ maxLength = Math.min(maxLength, adjustedParams.maxLength);
+ maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
+ }
+
+ generate_data = {
+ prompt: finalPrompt,
+ gui_settings: true,
+ max_length: maxLength,
+ max_context_length: max_context,
+ api_server,
+ };
+
+ if (preset_settings != 'gui') {
+ const isHorde = main_api == 'koboldhorde';
+ const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]];
+ const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
+ generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
+ }
+ break;
+ case 'textgenerationwebui': {
+ const cfgValues = useCfgPrompt ? { guidanceScale: cfgGuidanceScale, negativePrompt: getCombinedPrompt(true) } : null;
+ generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
+ break;
+ }
+ case 'novel': {
+ const cfgValues = useCfgPrompt ? { guidanceScale: cfgGuidanceScale } : null;
+ const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
+ generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
+ break;
+ }
+ case 'openai': {
+ let [prompt, counts] = await prepareOpenAIMessages({
+ name2: name2,
+ charDescription: description,
+ charPersonality: personality,
+ Scenario: scenario,
+ worldInfoBefore: worldInfoBefore,
+ worldInfoAfter: worldInfoAfter,
+ extensionPrompts: extension_prompts,
+ bias: promptBias,
+ type: type,
+ quietPrompt: quiet_prompt,
+ quietImage: quietImage,
+ cyclePrompt: cyclePrompt,
+ systemPromptOverride: system,
+ jailbreakPromptOverride: jailbreak,
+ personaDescription: persona,
+ messages: oaiMessages,
+ messageExamples: oaiMessageExamples,
+ }, dryRun);
+ generate_data = { prompt: prompt };
+
+ // TODO: move these side-effects somewhere else, so this switch-case solely sets generate_data
+ // counts will return false if the user has not enabled the token breakdown feature
+ if (counts) {
+ parseTokenCounts(counts, thisPromptBits);
+ }
+
+ if (!dryRun) {
+ setInContextMessages(openai_messages_count, type);
+ }
+ break;
+ }
+ }
+
+ await eventSource.emit(event_types.GENERATE_AFTER_DATA, generate_data);
+
+ if (dryRun) {
+ generatedPromptCache = '';
+ return Promise.resolve();
+ }
+
+ /**
+ * Saves itemized prompt bits and calls streaming or non-streaming generation API.
+ * @returns {Promise|String|{fromStream}|string|undefined|Object>}
+ * @throws {Error|object} Error with message text, or Error with response JSON (OAI/Horde), or the actual response JSON (novel|textgenerationwebui|kobold)
+ */
+ async function finishGenerating() {
+ if (power_user.console_log_prompts) {
+ console.log(generate_data.prompt);
+ }
+
+ console.debug('rungenerate calling API');
+
+ showStopButton();
+
+ //set array object for prompt token itemization of this message
+ let currentArrayEntry = Number(thisPromptBits.length - 1);
+ let additionalPromptStuff = {
+ ...thisPromptBits[currentArrayEntry],
+ rawPrompt: generate_data.prompt || generate_data.input,
+ mesId: getNextMessageId(type),
+ allAnchors: await getAllExtensionPrompts(),
+ chatInjects: injectedIndices?.map(index => arrMes[arrMes.length - index - 1])?.join('') || '',
+ summarizeString: (extension_prompts['1_memory']?.value || ''),
+ authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
+ smartContextString: (extension_prompts['chromadb']?.value || ''),
+ chatVectorsString: (extension_prompts['3_vectors']?.value || ''),
+ dataBankVectorsString: (extension_prompts['4_vectors_data_bank']?.value || ''),
+ worldInfoString: worldInfoString,
+ storyString: storyString,
+ beforeScenarioAnchor: beforeScenarioAnchor,
+ afterScenarioAnchor: afterScenarioAnchor,
+ examplesString: examplesString,
+ mesSendString: mesSendString,
+ generatedPromptCache: generatedPromptCache,
+ promptBias: promptBias,
+ finalPrompt: finalPrompt,
+ charDescription: description,
+ charPersonality: personality,
+ scenarioText: scenario,
+ this_max_context: this_max_context,
+ padding: power_user.token_padding,
+ main_api: main_api,
+ instruction: main_api !== 'openai' && power_user.sysprompt.enabled ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.sysprompt.content) : '',
+ userPersona: (power_user.persona_description_position == persona_description_positions.IN_PROMPT ? (persona || '') : ''),
+ tokenizer: getFriendlyTokenizerName(main_api).tokenizerName || '',
+ presetName: getPresetManager()?.getSelectedPresetName() || '',
+ };
+
+ //console.log(additionalPromptStuff);
+ const itemizedIndex = itemizedPrompts.findIndex((item) => item.mesId === additionalPromptStuff.mesId);
+
+ if (itemizedIndex !== -1) {
+ itemizedPrompts[itemizedIndex] = additionalPromptStuff;
+ }
+ else {
+ itemizedPrompts.push(additionalPromptStuff);
+ }
+
+ console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
+
+ if (isStreamingEnabled() && type !== 'quiet') {
+ streamingProcessor = new StreamingProcessor(type, force_name2, generation_started, continue_mag);
+ if (isContinue) {
+ // Save reply does add cycle text to the prompt, so it's not needed here
+ streamingProcessor.firstMessageText = '';
+ }
+
+ //
+ // DAVE MOD - Sampler Random
+ // Truely random TEMP / Other Samplers
+ // Mod #3
+ //
+
+ generate_data=dave_sampler(generate_data);
+
+ //
+ // END MODE
+ //
+
+ streamingProcessor.generator = await sendStreamingRequest(type, generate_data);
+
+ hideSwipeButtons();
+ let getMessage = await streamingProcessor.generate();
+
+ //
+ // DAVE MOD #4
+ // Stops double edit
+
+ dave_skip_dave_edit=1;
+
+ //
+ //
+
+
+ let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false);
+
+ if (isContinue) {
+ getMessage = continue_mag + getMessage;
+ }
+
+
+ //
+ // DAVE MOD #5
+ // getmeassge done it cleanup....
+ //
+
+ if (dave_ACTIVE==1) {
+ dave_skip_dave_edit=0;
+ if (dave_edit_flag>0 || dave_signal != 0 ) {
+ if (dave_edit_flag>0) { ({getMessage,continue_mag,messageChunk}=dave_edit(getMessage,continue_mag,messageChunk,1)); }
+ else {dave_signal_go(1);}
+ }
+ dave_message_watch=getMessage;
+ }
+
+ //
+ // END DAVE MOD
+ //
+
+
+ const isStreamFinished = streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished;
+ const isStreamWithToolCalls = streamingProcessor && Array.isArray(streamingProcessor.toolCalls) && streamingProcessor.toolCalls.length;
+ if (canPerformToolCalls && isStreamFinished && isStreamWithToolCalls) {
+ const lastMessage = chat[chat.length - 1];
+ const hasToolCalls = ToolManager.hasToolCalls(streamingProcessor.toolCalls);
+ const shouldDeleteMessage = type !== 'swipe' && ['', '...'].includes(lastMessage?.mes) && ['', '...'].includes(streamingProcessor?.result);
+ hasToolCalls && shouldDeleteMessage && await deleteLastMessage();
+ const invocationResult = await ToolManager.invokeFunctionTools(streamingProcessor.toolCalls);
+ const shouldStopGeneration = (!invocationResult.invocations.length && shouldDeleteMessage) || invocationResult.stealthCalls.length;
+ if (hasToolCalls) {
+ if (shouldStopGeneration) {
+ if (Array.isArray(invocationResult.errors) && invocationResult.errors.length) {
+ ToolManager.showToolCallError(invocationResult.errors);
+ }
+ unblockGeneration(type);
+ generatedPromptCache = '';
+ streamingProcessor = null;
+ return;
+ }
+
+ streamingProcessor = null;
+ depth = depth + 1;
+ await ToolManager.saveFunctionToolInvocations(invocationResult.invocations);
+ return Generate('normal', { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage, quietName, depth }, dryRun);
+ }
+ }
+
+ if (isStreamFinished) {
+ await streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage);
+ streamingProcessor = null;
+ triggerAutoContinue(messageChunk, isImpersonate);
+ return Object.defineProperties(new String(getMessage), {
+ 'messageChunk': { value: messageChunk },
+ 'fromStream': { value: true },
+ });
+ }
+ } else {
+ return await sendGenerationRequest(type, generate_data);
+ }
+ }
+
+ return finishGenerating().then(onSuccess, onError);
+
+ /**
+ * Handles the successful response from the generation API.
+ * @param data
+ * @returns {Promise|undefined>}
+ * @throws {Error} Throws an error if the response data contains an error message
+ */
+ async function onSuccess(data) {
+ if (!data) return;
+
+ if (data?.fromStream) {
+ return data;
+ }
+
+ let messageChunk = '';
+
+ // if an error was returned in data (textgenwebui), show it and throw it
+ if (data.error) {
+ unblockGeneration(type);
+ generatedPromptCache = '';
+
+ if (data?.response) {
+ toastr.error(data.response, t`API Error`, { preventDuplicates: true });
+ }
+ throw new Error(data?.response);
+ }
+
+ //const getData = await response.json();
+ let getMessage = extractMessageFromData(data);
+ let title = extractTitleFromData(data);
+ kobold_horde_model = title;
+
+ const swipes = extractMultiSwipes(data, type);
+
+ // DAVE MOD #6
+ //
+
+ dave_skip_dave_edit=1;
+
+ //
+ // END DAVE MOD
+
+ messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false);
+
+ if (isContinue) {
+ getMessage = continue_mag + getMessage;
+ }
+
+ //Formating
+ const displayIncomplete = type === 'quiet' && !quietToLoud;
+ getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete);
+
+ // DAVE MOD #7
+ // already did "getMessage" updates from "cleanup..." above.
+ //
+
+ if (dave_ACTIVE==1) {
+ dave_skip_dave_edit=0;
+ if (dave_edit_flag>0 || dave_signal != 0 ) {
+ if (dave_edit_flag>0 ) { ({getMessage,continue_mag,messageChunk}=dave_edit(getMessage,continue_mag,messageChunk,2)); }
+ else { dave_signal_go(2); }
+ }
+ dave_message_watch=getMessage;
+ }
+
+ //
+ // END DAVE MOD
+
+ if (isImpersonate) {
+ $('#send_textarea').val(getMessage)[0].dispatchEvent(new Event('input', { bubbles: true }));
+ generatedPromptCache = '';
+ await eventSource.emit(event_types.IMPERSONATE_READY, getMessage);
+ }
+ else if (type == 'quiet') {
+ unblockGeneration(type);
+ return getMessage;
+ }
+ else {
+ // Without streaming we'll be having a full message on continuation. Treat it as a last chunk.
+ if (originalType !== 'continue') {
+ ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes));
+ }
+ else {
+ ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes));
+ }
+
+ // This relies on `saveReply` having been called to add the message to the chat, so it must be last.
+ parseAndSaveLogprobs(data, continue_mag);
+ }
+
+ if (canPerformToolCalls) {
+ const hasToolCalls = ToolManager.hasToolCalls(data);
+ const shouldDeleteMessage = type !== 'swipe' && ['', '...'].includes(getMessage);
+ hasToolCalls && shouldDeleteMessage && await deleteLastMessage();
+ const invocationResult = await ToolManager.invokeFunctionTools(data);
+ const shouldStopGeneration = (!invocationResult.invocations.length && shouldDeleteMessage) || invocationResult.stealthCalls.length;
+ if (hasToolCalls) {
+ if (shouldStopGeneration) {
+ if (Array.isArray(invocationResult.errors) && invocationResult.errors.length) {
+ ToolManager.showToolCallError(invocationResult.errors);
+ }
+ unblockGeneration(type);
+ generatedPromptCache = '';
+ return;
+ }
+
+ depth = depth + 1;
+ await ToolManager.saveFunctionToolInvocations(invocationResult.invocations);
+ return Generate('normal', { automatic_trigger, force_name2, quiet_prompt, quietToLoud, skipWIAN, force_chid, signal, quietImage, quietName, depth }, dryRun);
+ }
+ }
+
+ if (type !== 'quiet') {
+ playMessageSound();
+ }
+
+ const isAborted = abortController && abortController.signal.aborted;
+ if (power_user.auto_swipe && !isAborted) {
+ console.debug('checking for autoswipeblacklist on non-streaming message');
+ function containsBlacklistedWords(getMessage, blacklist, threshold) {
+ console.debug('checking blacklisted words');
+ const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi');
+ const matches = getMessage.match(regex) || [];
+ return matches.length >= threshold;
+ }
+
+ const generatedTextFiltered = (getMessage) => {
+ if (power_user.auto_swipe_blacklist_threshold) {
+ if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) {
+ console.debug('Generated text has blacklisted words');
+ return true;
+ }
+ }
+
+ return false;
+ };
+ if (generatedTextFiltered(getMessage)) {
+ console.debug('swiping right automatically');
+ is_send_press = false;
+ swipe_right();
+ // TODO: do we want to resolve after an auto-swipe?
+ return;
+ }
+ }
+
+ console.debug('/api/chats/save called by /Generate');
+ await saveChatConditional();
+ unblockGeneration(type);
+ streamingProcessor = null;
+
+ if (type !== 'quiet') {
+ triggerAutoContinue(messageChunk, isImpersonate);
+ }
+
+ // Don't break the API chain that expects a single string in return
+ return Object.defineProperty(new String(getMessage), 'messageChunk', { value: messageChunk });
+ }
+
+ /**
+ * Exception handler for finishGenerating
+ * @param {Error|object} exception Error or response JSON
+ * @throws {Error|object} Re-throws the exception
+ */
+ function onError(exception) {
+ // if the response JSON was thrown (novel|textgenerationwebui|kobold), show the error message
+ if (typeof exception?.error?.message === 'string') {
+ toastr.error(exception.error.message, t`Text generation error`, { timeOut: 10000, extendedTimeOut: 20000 });
+ }
+
+ generatedPromptCache = '';
+
+ unblockGeneration(type);
+ console.log(exception);
+ streamingProcessor = null;
+ throw exception;
+ }
+}
+
+/**
+ * Stops the generation and any streaming if it is currently running.
+ */
+export function stopGeneration() {
+ let stopped = false;
+ if (streamingProcessor) {
+ streamingProcessor.onStopStreaming();
+ stopped = true;
+ }
+ if (abortController) {
+ abortController.abort('Clicked stop button');
+ hideStopButton();
+ stopped = true;
+ }
+ eventSource.emit(event_types.GENERATION_STOPPED);
+ return stopped;
+}
+
+/**
+ * Injects extension prompts into chat messages.
+ * @param {object[]} messages Array of chat messages
+ * @param {boolean} isContinue Whether the generation is a continuation. If true, the extension prompts of depth 0 are injected at position 1.
+ * @returns {Promise} Array of indices where the extension prompts were injected
+ */
+async function doChatInject(messages, isContinue) {
+ const injectedIndices = [];
+ let totalInsertedMessages = 0;
+ messages.reverse();
+
+ for (let i = 0; i <= MAX_INJECTION_DEPTH; i++) {
+ // Order of priority (most important go lower)
+ const roles = [extension_prompt_roles.SYSTEM, extension_prompt_roles.USER, extension_prompt_roles.ASSISTANT];
+ const names = {
+ [extension_prompt_roles.SYSTEM]: '',
+ [extension_prompt_roles.USER]: name1,
+ [extension_prompt_roles.ASSISTANT]: name2,
+ };
+ const roleMessages = [];
+ const separator = '\n';
+ const wrap = false;
+
+ for (const role of roles) {
+ const extensionPrompt = String(await getExtensionPrompt(extension_prompt_types.IN_CHAT, i, separator, role, wrap)).trimStart();
+ const isNarrator = role === extension_prompt_roles.SYSTEM;
+ const isUser = role === extension_prompt_roles.USER;
+ const name = names[role];
+
+ if (extensionPrompt) {
+ roleMessages.push({
+ name: name,
+ is_user: isUser,
+ mes: extensionPrompt,
+ extra: {
+ type: isNarrator ? system_message_types.NARRATOR : null,
+ },
+ });
+ }
+ }
+
+ if (roleMessages.length) {
+ const depth = isContinue && i === 0 ? 1 : i;
+ const injectIdx = depth + totalInsertedMessages;
+ messages.splice(injectIdx, 0, ...roleMessages);
+ totalInsertedMessages += roleMessages.length;
+ injectedIndices.push(...Array.from({ length: roleMessages.length }, (_, i) => injectIdx + i));
+ }
+ }
+
+ messages.reverse();
+ return injectedIndices;
+}
+
+function flushWIDepthInjections() {
+ //prevent custom depth WI entries (which have unique random key names) from duplicating
+ for (const key of Object.keys(extension_prompts)) {
+ if (key.startsWith('customDepthWI')) {
+ delete extension_prompts[key];
+ }
+ }
+}
+
+/**
+ * Unblocks the UI after a generation is complete.
+ * @param {string} [type] Generation type (optional)
+ */
+function unblockGeneration(type) {
+ // Don't unblock if a parallel stream is still running
+ if (type === 'quiet' && streamingProcessor && !streamingProcessor.isFinished) {
+ return;
+ }
+
+ is_send_press = false;
+ activateSendButtons();
+ showSwipeButtons();
+ setGenerationProgress(0);
+ flushEphemeralStoppingStrings();
+ flushWIDepthInjections();
+}
+
+export function getNextMessageId(type) {
+ return type == 'swipe' ? chat.length - 1 : chat.length;
+}
+
+/**
+ * Determines if the message should be auto-continued.
+ * @param {string} messageChunk Current message chunk
+ * @param {boolean} isImpersonate Is the user impersonation
+ * @returns {boolean} Whether the message should be auto-continued
+ */
+export function shouldAutoContinue(messageChunk, isImpersonate) {
+
+ //
+ // DAVE MOD ; Will reset via separate controller.
+ // Mod #8
+ //
+ // dave_auto_continue OVERRIDE controller.
+ // 1=> Active.
+ //
+
+ if (dave_auto_continue==1) {
+
+ if ( dave_holdtext.length<2 ) {
+ dave_holdtext_count++;
+
+ if (dave_holdtext_count>4) {
+ dave_auto_continue=0;
+ console.debug('DAVE STOP: getnextmeessage function');
+ return false;
+ }
+ }
+
+ else {
+ dave_holdtext_count=0;
+ return true;
+ }
+
+ }
+
+ //
+ // END MOD
+ //
+
+
+ if (!power_user.auto_continue.enabled) {
+ console.debug('Auto-continue is disabled by user.');
+ return false;
+ }
+
+ if (typeof messageChunk !== 'string') {
+ console.debug('Not triggering auto-continue because message chunk is not a string');
+ return false;
+ }
+
+ if (isImpersonate) {
+ console.log('Continue for impersonation is not implemented yet');
+ return false;
+ }
+
+ if (is_send_press) {
+ console.debug('Auto-continue is disabled because a message is currently being sent.');
+ return false;
+ }
+
+ if (abortController && abortController.signal.aborted) {
+ console.debug('Auto-continue is not triggered because the generation was stopped.');
+ return false;
+ }
+
+ if (power_user.auto_continue.target_length <= 0) {
+ console.log('Auto-continue target length is 0, not triggering auto-continue');
+ return false;
+ }
+
+ if (main_api === 'openai' && !power_user.auto_continue.allow_chat_completions) {
+ console.log('Auto-continue for OpenAI is disabled by user.');
+ return false;
+ }
+
+ const textareaText = String($('#send_textarea').val());
+ const USABLE_LENGTH = 5;
+
+ if (textareaText.length > 0) {
+ console.log('Not triggering auto-continue because user input is not empty');
+ return false;
+ }
+
+ if (messageChunk.trim().length > USABLE_LENGTH && chat.length) {
+ const lastMessage = chat[chat.length - 1];
+ const messageLength = getTokenCount(lastMessage.mes);
+ const shouldAutoContinue = messageLength < power_user.auto_continue.target_length;
+
+ if (shouldAutoContinue) {
+ console.log(`Triggering auto-continue. Message tokens: ${messageLength}. Target tokens: ${power_user.auto_continue.target_length}. Message chunk: ${messageChunk}`);
+ return true;
+ } else {
+ console.log(`Not triggering auto-continue. Message tokens: ${messageLength}. Target tokens: ${power_user.auto_continue.target_length}`);
+ return false;
+ }
+ } else {
+ console.log('Last generated chunk was empty, not triggering auto-continue');
+ return false;
+ }
+}
+
+/**
+ * Triggers auto-continue if the message meets the criteria.
+ * @param {string} messageChunk Current message chunk
+ * @param {boolean} isImpersonate Is the user impersonation
+ */
+export function triggerAutoContinue(messageChunk, isImpersonate) {
+ if (selected_group) {
+ console.debug('Auto-continue is disabled for group chat');
+ return;
+ }
+
+ if (shouldAutoContinue(messageChunk, isImpersonate)) {
+ $('#option_continue').trigger('click');
+ }
+}
+
+export function getBiasStrings(textareaText, type) {
+ if (type == 'impersonate' || type == 'continue') {
+ return { messageBias: '', promptBias: '', isUserPromptBias: false };
+ }
+
+ let promptBias = '';
+ let messageBias = extractMessageBias(textareaText);
+
+ // If user input is not provided, retrieve the bias of the most recent relevant message
+ if (!textareaText) {
+ for (let i = chat.length - 1; i >= 0; i--) {
+ const mes = chat[i];
+ if (type === 'swipe' && chat.length - 1 === i) {
+ continue;
+ }
+ if (mes && (mes.is_user || mes.is_system || mes.extra?.type === system_message_types.NARRATOR)) {
+ if (mes.extra?.bias?.trim()?.length > 0) {
+ promptBias = mes.extra.bias;
+ }
+ break;
+ }
+ }
+ }
+
+ promptBias = messageBias || promptBias || power_user.user_prompt_bias || '';
+ const isUserPromptBias = promptBias === power_user.user_prompt_bias;
+
+ // Substitute params for everything
+ messageBias = substituteParams(messageBias);
+ promptBias = substituteParams(promptBias);
+
+ return { messageBias, promptBias, isUserPromptBias };
+}
+
+/**
+ * @param {Object} chatItem Message history item.
+ * @param {boolean} isInstruct Whether instruct mode is enabled.
+ * @param {boolean|number} forceOutputSequence Whether to force the first/last output sequence for instruct mode.
+ */
+function formatMessageHistoryItem(chatItem, isInstruct, forceOutputSequence) {
+ const isNarratorType = chatItem?.extra?.type === system_message_types.NARRATOR;
+ const characterName = chatItem?.name ? chatItem.name : name2;
+ const itemName = chatItem.is_user ? chatItem['name'] : characterName;
+ const shouldPrependName = !isNarratorType;
+
+ // Don't include a name if it's empty
+ let textResult = chatItem?.name && shouldPrependName ? `${itemName}: ${chatItem.mes}\n` : `${chatItem.mes}\n`;
+
+ if (isInstruct) {
+ textResult = formatInstructModeChat(itemName, chatItem.mes, chatItem.is_user, isNarratorType, chatItem.force_avatar, name1, name2, forceOutputSequence);
+ }
+
+ return textResult;
+}
+
+/**
+ * Removes all {{macros}} from a string.
+ * @param {string} str String to remove macros from.
+ * @returns {string} String with macros removed.
+ */
+export function removeMacros(str) {
+ return (str ?? '').replace(/\{\{[\s\S]*?\}\}/gm, '').trim();
+}
+
+/**
+ * Inserts a user message into the chat history.
+ * @param {string} messageText Message text.
+ * @param {string} messageBias Message bias.
+ * @param {number} [insertAt] Optional index to insert the message at.
+ * @param {boolean} [compact] Send as a compact display message.
+ * @param {string} [name] Name of the user sending the message. Defaults to name1.
+ * @param {string} [avatar] Avatar of the user sending the message. Defaults to user_avatar.
+ * @returns {Promise} A promise that resolves to the message when it is inserted.
+ */
+export async function sendMessageAsUser(messageText, messageBias, insertAt = null, compact = false, name = name1, avatar = user_avatar) {
+ messageText = getRegexedString(messageText, regex_placement.USER_INPUT);
+
+ const message = {
+ name: name,
+ is_user: true,
+ is_system: false,
+ send_date: getMessageTimeStamp(),
+ mes: substituteParams(messageText),
+ extra: {
+ isSmallSys: compact,
+ },
+ };
+
+ if (power_user.message_token_count_enabled) {
+ message.extra.token_count = await getTokenCountAsync(message.mes, 0);
+ }
+
+ // Lock user avatar to a persona.
+ if (avatar in power_user.personas) {
+ message.force_avatar = getUserAvatar(avatar);
+ }
+
+ if (messageBias) {
+ message.extra.bias = messageBias;
+ message.mes = removeMacros(message.mes);
+ }
+
+ await populateFileAttachment(message);
+ statMesProcess(message, 'user', characters, this_chid, '');
+
+ if (typeof insertAt === 'number' && insertAt >= 0 && insertAt <= chat.length) {
+ chat.splice(insertAt, 0, message);
+ await saveChatConditional();
+ await eventSource.emit(event_types.MESSAGE_SENT, insertAt);
+ await reloadCurrentChat();
+ await eventSource.emit(event_types.USER_MESSAGE_RENDERED, insertAt);
+ } else {
+ chat.push(message);
+ const chat_id = (chat.length - 1);
+ await eventSource.emit(event_types.MESSAGE_SENT, chat_id);
+ addOneMessage(message);
+ await eventSource.emit(event_types.USER_MESSAGE_RENDERED, chat_id);
+ await saveChatConditional();
+ }
+
+ return message;
+}
+
+/**
+ * Gets the maximum usable context size for the current API.
+ * @param {number|null} overrideResponseLength Optional override for the response length.
+ * @returns {number} Maximum usable context size.
+ */
+export function getMaxContextSize(overrideResponseLength = null) {
+ if (typeof overrideResponseLength !== 'number' || overrideResponseLength <= 0 || isNaN(overrideResponseLength)) {
+ overrideResponseLength = null;
+ }
+
+ let this_max_context = 1487;
+ if (main_api == 'kobold' || main_api == 'koboldhorde' || main_api == 'textgenerationwebui') {
+ this_max_context = (max_context - (overrideResponseLength || amount_gen));
+ }
+ if (main_api == 'novel') {
+ this_max_context = Number(max_context);
+ if (nai_settings.model_novel.includes('clio')) {
+ this_max_context = Math.min(max_context, 8192);
+ }
+ if (nai_settings.model_novel.includes('kayra')) {
+ this_max_context = Math.min(max_context, 8192);
+
+ const subscriptionLimit = getKayraMaxContextTokens();
+ if (typeof subscriptionLimit === 'number' && this_max_context > subscriptionLimit) {
+ this_max_context = subscriptionLimit;
+ console.log(`NovelAI subscription limit reached. Max context size is now ${this_max_context}`);
+ }
+ }
+ if (nai_settings.model_novel.includes('erato')) {
+ // subscriber limits coming soon
+ this_max_context = Math.min(max_context, 8192);
+
+ // Added special tokens and whatnot
+ this_max_context -= 10;
+ }
+
+ this_max_context = this_max_context - (overrideResponseLength || amount_gen);
+ }
+ if (main_api == 'openai') {
+ this_max_context = oai_settings.openai_max_context - (overrideResponseLength || oai_settings.openai_max_tokens);
+ }
+ return this_max_context;
+}
+
+function parseTokenCounts(counts, thisPromptBits) {
+ /**
+ * @param {any[]} numbers
+ */
+ function getSum(...numbers) {
+ return numbers.map(x => Number(x)).filter(x => !Number.isNaN(x)).reduce((acc, val) => acc + val, 0);
+ }
+ const total = getSum(Object.values(counts));
+
+ thisPromptBits.push({
+ oaiStartTokens: (counts?.start + counts?.controlPrompts) || 0,
+ oaiPromptTokens: getSum(counts?.prompt, counts?.charDescription, counts?.charPersonality, counts?.scenario) || 0,
+ oaiBiasTokens: counts?.bias || 0,
+ oaiNudgeTokens: counts?.nudge || 0,
+ oaiJailbreakTokens: counts?.jailbreak || 0,
+ oaiImpersonateTokens: counts?.impersonate || 0,
+ oaiExamplesTokens: (counts?.dialogueExamples + counts?.examples) || 0,
+ oaiConversationTokens: (counts?.conversation + counts?.chatHistory) || 0,
+ oaiNsfwTokens: counts?.nsfw || 0,
+ oaiMainTokens: counts?.main || 0,
+ oaiTotalTokens: total,
+ });
+}
+
+function addChatsPreamble(mesSendString) {
+ return main_api === 'novel'
+ ? substituteParams(nai_settings.preamble) + '\n' + mesSendString
+ : mesSendString;
+}
+
+function addChatsSeparator(mesSendString) {
+ if (power_user.context.chat_start) {
+ return substituteParams(power_user.context.chat_start + '\n') + mesSendString;
+ }
+
+ else {
+ return mesSendString;
+ }
+}
+
+async function duplicateCharacter() {
+ if (!this_chid) {
+ toastr.warning(t`You must first select a character to duplicate!`);
+ return '';
+ }
+
+ const confirmMessage = $(await renderTemplateAsync('duplicateConfirm'));
+ const confirm = await callGenericPopup(confirmMessage, POPUP_TYPE.CONFIRM);
+
+ if (!confirm) {
+ console.log('User cancelled duplication');
+ return '';
+ }
+
+ const body = { avatar_url: characters[this_chid].avatar };
+ const response = await fetch('/api/characters/duplicate', {
+ method: 'POST',
+ headers: getRequestHeaders(),
+ body: JSON.stringify(body),
+ });
+ if (response.ok) {
+ toastr.success(t`Character Duplicated`);
+ const data = await response.json();
+ await eventSource.emit(event_types.CHARACTER_DUPLICATED, { oldAvatar: body.avatar_url, newAvatar: data.path });
+ await getCharacters();
+ }
+
+ return '';
+}
+
+export async function itemizedParams(itemizedPrompts, thisPromptSet, incomingMesId) {
+ const params = {
+ charDescriptionTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].charDescription),
+ charPersonalityTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].charPersonality),
+ scenarioTextTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].scenarioText),
+ userPersonaStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].userPersona),
+ worldInfoStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].worldInfoString),
+ allAnchorsTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].allAnchors),
+ summarizeStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].summarizeString),
+ authorsNoteStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].authorsNoteString),
+ smartContextStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].smartContextString),
+ beforeScenarioAnchorTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].beforeScenarioAnchor),
+ afterScenarioAnchorTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].afterScenarioAnchor),
+ zeroDepthAnchorTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].zeroDepthAnchor), // TODO: unused
+ thisPrompt_padding: itemizedPrompts[thisPromptSet].padding,
+ this_main_api: itemizedPrompts[thisPromptSet].main_api,
+ chatInjects: await getTokenCountAsync(itemizedPrompts[thisPromptSet].chatInjects),
+ chatVectorsStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].chatVectorsString),
+ dataBankVectorsStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].dataBankVectorsString),
+ modelUsed: chat[incomingMesId]?.extra?.model,
+ apiUsed: chat[incomingMesId]?.extra?.api,
+ presetName: itemizedPrompts[thisPromptSet].presetName || t`(Unknown)`,
+ };
+
+ const getFriendlyName = (value) => $(`#rm_api_block select option[value="${value}"]`).first().text() || value;
+
+ if (params.apiUsed) {
+ params.apiUsed = getFriendlyName(params.apiUsed);
+ }
+
+ if (params.this_main_api) {
+ params.mainApiFriendlyName = getFriendlyName(params.this_main_api);
+ }
+
+ if (params.chatInjects) {
+ params.ActualChatHistoryTokens = params.ActualChatHistoryTokens - params.chatInjects;
+ }
+
+ if (params.this_main_api == 'openai') {
+ //for OAI API
+ //console.log('-- Counting OAI Tokens');
+
+ //params.finalPromptTokens = itemizedPrompts[thisPromptSet].oaiTotalTokens;
+ params.oaiMainTokens = itemizedPrompts[thisPromptSet].oaiMainTokens;
+ params.oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
+ params.ActualChatHistoryTokens = itemizedPrompts[thisPromptSet].oaiConversationTokens;
+ params.examplesStringTokens = itemizedPrompts[thisPromptSet].oaiExamplesTokens;
+ params.oaiPromptTokens = itemizedPrompts[thisPromptSet].oaiPromptTokens - (params.afterScenarioAnchorTokens + params.beforeScenarioAnchorTokens) + params.examplesStringTokens;
+ params.oaiBiasTokens = itemizedPrompts[thisPromptSet].oaiBiasTokens;
+ params.oaiJailbreakTokens = itemizedPrompts[thisPromptSet].oaiJailbreakTokens;
+ params.oaiNudgeTokens = itemizedPrompts[thisPromptSet].oaiNudgeTokens;
+ params.oaiImpersonateTokens = itemizedPrompts[thisPromptSet].oaiImpersonateTokens;
+ params.oaiNsfwTokens = itemizedPrompts[thisPromptSet].oaiNsfwTokens;
+ params.finalPromptTokens =
+ params.oaiStartTokens +
+ params.oaiPromptTokens +
+ params.oaiMainTokens +
+ params.oaiNsfwTokens +
+ params.oaiBiasTokens +
+ params.oaiImpersonateTokens +
+ params.oaiJailbreakTokens +
+ params.oaiNudgeTokens +
+ params.ActualChatHistoryTokens +
+ //charDescriptionTokens +
+ //charPersonalityTokens +
+ //allAnchorsTokens +
+ params.worldInfoStringTokens +
+ params.beforeScenarioAnchorTokens +
+ params.afterScenarioAnchorTokens;
+ // Max context size - max completion tokens
+ params.thisPrompt_max_context = (oai_settings.openai_max_context - oai_settings.openai_max_tokens);
+
+ //console.log('-- applying % on OAI tokens');
+ params.oaiStartTokensPercentage = ((params.oaiStartTokens / (params.finalPromptTokens)) * 100).toFixed(2);
+ params.storyStringTokensPercentage = (((params.afterScenarioAnchorTokens + params.beforeScenarioAnchorTokens + params.oaiPromptTokens) / (params.finalPromptTokens)) * 100).toFixed(2);
+ params.ActualChatHistoryTokensPercentage = ((params.ActualChatHistoryTokens / (params.finalPromptTokens)) * 100).toFixed(2);
+ params.promptBiasTokensPercentage = ((params.oaiBiasTokens / (params.finalPromptTokens)) * 100).toFixed(2);
+ params.worldInfoStringTokensPercentage = ((params.worldInfoStringTokens / (params.finalPromptTokens)) * 100).toFixed(2);
+ params.allAnchorsTokensPercentage = ((params.allAnchorsTokens / (params.finalPromptTokens)) * 100).toFixed(2);
+ params.selectedTokenizer = getFriendlyTokenizerName(params.this_main_api).tokenizerName;
+ params.oaiSystemTokens = params.oaiImpersonateTokens + params.oaiJailbreakTokens + params.oaiNudgeTokens + params.oaiStartTokens + params.oaiNsfwTokens + params.oaiMainTokens;
+ params.oaiSystemTokensPercentage = ((params.oaiSystemTokens / (params.finalPromptTokens)) * 100).toFixed(2);
+ } else {
+ //for non-OAI APIs
+ //console.log('-- Counting non-OAI Tokens');
+ params.finalPromptTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].finalPrompt);
+ params.storyStringTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].storyString) - params.worldInfoStringTokens;
+ params.examplesStringTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].examplesString);
+ params.mesSendStringTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].mesSendString);
+ params.ActualChatHistoryTokens = params.mesSendStringTokens - (params.allAnchorsTokens - (params.beforeScenarioAnchorTokens + params.afterScenarioAnchorTokens)) + power_user.token_padding;
+ params.instructionTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].instruction);
+ params.promptBiasTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].promptBias);
+
+ params.totalTokensInPrompt =
+ params.storyStringTokens + //chardefs total
+ params.worldInfoStringTokens +
+ params.examplesStringTokens + // example messages
+ params.ActualChatHistoryTokens + //chat history
+ params.allAnchorsTokens + // AN and/or legacy anchors
+ //afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
+ //zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
+ params.promptBiasTokens; //{{}}
+ //- thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPrompt'
+ params.thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
+ params.thisPrompt_actual = params.thisPrompt_max_context - params.thisPrompt_padding;
+
+ //console.log('-- applying % on non-OAI tokens');
+ params.storyStringTokensPercentage = ((params.storyStringTokens / (params.totalTokensInPrompt)) * 100).toFixed(2);
+ params.ActualChatHistoryTokensPercentage = ((params.ActualChatHistoryTokens / (params.totalTokensInPrompt)) * 100).toFixed(2);
+ params.promptBiasTokensPercentage = ((params.promptBiasTokens / (params.totalTokensInPrompt)) * 100).toFixed(2);
+ params.worldInfoStringTokensPercentage = ((params.worldInfoStringTokens / (params.totalTokensInPrompt)) * 100).toFixed(2);
+ params.allAnchorsTokensPercentage = ((params.allAnchorsTokens / (params.totalTokensInPrompt)) * 100).toFixed(2);
+ params.selectedTokenizer = itemizedPrompts[thisPromptSet]?.tokenizer || getFriendlyTokenizerName(params.this_main_api).tokenizerName;
+ }
+ return params;
+}
+
+export function findItemizedPromptSet(itemizedPrompts, incomingMesId) {
+ var thisPromptSet = undefined;
+
+ for (var i = 0; i < itemizedPrompts.length; i++) {
+ console.log(`looking for ${incomingMesId} vs ${itemizedPrompts[i].mesId}`);
+ if (itemizedPrompts[i].mesId === incomingMesId) {
+ console.log(`found matching mesID ${i}`);
+ thisPromptSet = i;
+ PromptArrayItemForRawPromptDisplay = i;
+ console.log(`wanting to raw display of ArrayItem: ${PromptArrayItemForRawPromptDisplay} which is mesID ${incomingMesId}`);
+ console.log(itemizedPrompts[thisPromptSet]);
+ break;
+ } else if (itemizedPrompts[i].rawPrompt) {
+ priorPromptArrayItemForRawPromptDisplay = i;
+ }
+ }
+ return thisPromptSet;
+}
+
+async function promptItemize(itemizedPrompts, requestedMesId) {
+ console.log('PROMPT ITEMIZE ENTERED');
+ var incomingMesId = Number(requestedMesId);
+ console.debug(`looking for MesId ${incomingMesId}`);
+ var thisPromptSet = findItemizedPromptSet(itemizedPrompts, incomingMesId);
+
+ if (thisPromptSet === undefined) {
+ console.log(`couldnt find the right mesId. looked for ${incomingMesId}`);
+ console.log(itemizedPrompts);
+ return null;
+ }
+
+ const params = await itemizedParams(itemizedPrompts, thisPromptSet, incomingMesId);
+ const flatten = (rawPrompt) => Array.isArray(rawPrompt) ? rawPrompt.map(x => x.content).join('\n') : rawPrompt;
+
+ const template = params.this_main_api == 'openai'
+ ? await renderTemplateAsync('itemizationChat', params)
+ : await renderTemplateAsync('itemizationText', params);
+
+ const popup = new Popup(template, POPUP_TYPE.TEXT);
+
+ /** @type {HTMLElement} */
+ const diffPrevPrompt = popup.dlg.querySelector('#diffPrevPrompt');
+ if (priorPromptArrayItemForRawPromptDisplay) {
+ diffPrevPrompt.style.display = '';
+ diffPrevPrompt.addEventListener('click', function () {
+ const dmp = new DiffMatchPatch();
+ const text1 = flatten(itemizedPrompts[priorPromptArrayItemForRawPromptDisplay].rawPrompt);
+ const text2 = flatten(itemizedPrompts[PromptArrayItemForRawPromptDisplay].rawPrompt);
+
+ dmp.Diff_Timeout = 2.0;
+
+ const d = dmp.diff_main(text1, text2);
+ let ds = dmp.diff_prettyHtml(d);
+ // make it readable
+ ds = ds.replaceAll('background:#e6ffe6;', 'background:#b9f3b9; color:black;');
+ ds = ds.replaceAll('background:#ffe6e6;', 'background:#f5b4b4; color:black;');
+ ds = ds.replaceAll('¶', '');
+ const container = document.createElement('div');
+ container.innerHTML = DOMPurify.sanitize(ds);
+ const rawPromptWrapper = document.getElementById('rawPromptWrapper');
+ rawPromptWrapper.replaceChildren(container);
+ $('#rawPromptPopup').slideToggle();
+ });
+ } else {
+ diffPrevPrompt.style.display = 'none';
+ }
+ popup.dlg.querySelector('#copyPromptToClipboard').addEventListener('pointerup', async function () {
+ let rawPrompt = itemizedPrompts[PromptArrayItemForRawPromptDisplay].rawPrompt;
+ let rawPromptValues = rawPrompt;
+
+ if (Array.isArray(rawPrompt)) {
+ rawPromptValues = rawPrompt.map(x => x.content).join('\n');
+ }
+
+ await copyText(rawPromptValues);
+ toastr.info(t`Copied!`);
+ });
+
+ popup.dlg.querySelector('#showRawPrompt').addEventListener('click', function () {
+ //console.log(itemizedPrompts[PromptArrayItemForRawPromptDisplay].rawPrompt);
+ console.log(PromptArrayItemForRawPromptDisplay);
+ console.log(itemizedPrompts);
+ console.log(itemizedPrompts[PromptArrayItemForRawPromptDisplay].rawPrompt);
+
+ const rawPrompt = flatten(itemizedPrompts[PromptArrayItemForRawPromptDisplay].rawPrompt);
+
+ //let DisplayStringifiedPrompt = JSON.stringify(itemizedPrompts[PromptArrayItemForRawPromptDisplay].rawPrompt).replace(/\n+/g, ' ');
+ const rawPromptWrapper = document.getElementById('rawPromptWrapper');
+ rawPromptWrapper.innerText = rawPrompt;
+ $('#rawPromptPopup').slideToggle();
+ });
+
+ await popup.show();
+}
+
+function setInContextMessages(msgInContextCount, type) {
+ $('#chat .mes').removeClass('lastInContext');
+
+ if (type === 'swipe' || type === 'regenerate' || type === 'continue') {
+ msgInContextCount++;
+ }
+
+ const lastMessageBlock = $('#chat .mes:not([is_system="true"])').eq(-msgInContextCount);
+ lastMessageBlock.addClass('lastInContext');
+
+ if (lastMessageBlock.length === 0) {
+ const firstMessageId = getFirstDisplayedMessageId();
+ $(`#chat .mes[mesid="${firstMessageId}"`).addClass('lastInContext');
+ }
+
+ // Update last id to chat. No metadata save on purpose, gets hopefully saved via another call
+ const lastMessageId = Math.max(0, chat.length - msgInContextCount);
+ chat_metadata['lastInContextMessageId'] = lastMessageId;
+}
+
+/**
+ * Sends a non-streaming request to the API.
+ * @param {string} type Generation type
+ * @param {object} data Generation data
+ * @returns {Promise