mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 04:48:14 -05:00
Compare commits
16 Commits
ryan/model
...
v5.4.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7132ce9e7 | ||
|
|
90f30e7748 | ||
|
|
6b86a66bc7 | ||
|
|
aa97e626e9 | ||
|
|
c90736093f | ||
|
|
0bff4ace1b | ||
|
|
5eb382074e | ||
|
|
46aa930526 | ||
|
|
3305bad0c2 | ||
|
|
13703d8f55 | ||
|
|
60d838d0a5 | ||
|
|
2a157a44bf | ||
|
|
d61b5833c2 | ||
|
|
c094838c6a | ||
|
|
2d334c8dd8 | ||
|
|
a6be26e174 |
@@ -50,7 +50,7 @@ Applications are built on top of the invoke framework. They should construct `in
|
||||
|
||||
### Web UI
|
||||
|
||||
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
|
||||
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/invokeai/frontend` and the backend code is found in `/invokeai/app/api_app.py` and `/invokeai/app/api/`. The code is further organized as such:
|
||||
|
||||
| Component | Description |
|
||||
| --- | --- |
|
||||
@@ -62,7 +62,7 @@ The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.t
|
||||
|
||||
### CLI
|
||||
|
||||
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
|
||||
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/invokeai/frontend/cli`.
|
||||
|
||||
## Invoke
|
||||
|
||||
@@ -70,7 +70,7 @@ The Invoke framework provides the interface to the underlying AI systems and is
|
||||
|
||||
### Invoker
|
||||
|
||||
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||
The invoker (`/invokeai/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||
- **invocation services**, which are used by invocations to interact with core functionality.
|
||||
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
|
||||
|
||||
@@ -82,12 +82,12 @@ The session graph does not support looping. This is left as an application probl
|
||||
|
||||
### Invocations
|
||||
|
||||
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/invokeai/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||
|
||||
### Services
|
||||
|
||||
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/invokeai/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||
|
||||
## AI Core
|
||||
|
||||
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
|
||||
The AI Core is represented by the rest of the code base (i.e. the code outside of `/invokeai/app/`).
|
||||
|
||||
@@ -287,8 +287,8 @@ new Invocation ready to be used.
|
||||
|
||||
Once you've created a Node, the next step is to share it with the community! The
|
||||
best way to do this is to submit a Pull Request to add the Node to the
|
||||
[Community Nodes](nodes/communityNodes) list. If you're not sure how to do that,
|
||||
take a look a at our [contributing nodes overview](contributingNodes).
|
||||
[Community Nodes](../nodes/communityNodes.md) list. If you're not sure how to do that,
|
||||
take a look a at our [contributing nodes overview](../nodes/contributingNodes.md).
|
||||
|
||||
## Advanced
|
||||
|
||||
|
||||
@@ -9,20 +9,20 @@ model. These are the:
|
||||
configuration information. Among other things, the record service
|
||||
tracks the type of the model, its provenance, and where it can be
|
||||
found on disk.
|
||||
|
||||
|
||||
* _ModelInstallServiceBase_ A service for installing models to
|
||||
disk. It uses `DownloadQueueServiceBase` to download models and
|
||||
their metadata, and `ModelRecordServiceBase` to store that
|
||||
information. It is also responsible for managing the InvokeAI
|
||||
`models` directory and its contents.
|
||||
|
||||
|
||||
* _DownloadQueueServiceBase_
|
||||
A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
queue has special methods for downloading repo_id folders from
|
||||
Hugging Face, as well as discriminating among model versions in
|
||||
Civitai, but can be used for arbitrary content.
|
||||
|
||||
|
||||
* _ModelLoadServiceBase_
|
||||
Responsible for loading a model from disk
|
||||
into RAM and VRAM and getting it ready for inference.
|
||||
@@ -207,9 +207,9 @@ for use in the InvokeAI web server. Its signature is:
|
||||
|
||||
```
|
||||
def open(
|
||||
cls,
|
||||
config: InvokeAIAppConfig,
|
||||
conn: Optional[sqlite3.Connection] = None,
|
||||
cls,
|
||||
config: InvokeAIAppConfig,
|
||||
conn: Optional[sqlite3.Connection] = None,
|
||||
lock: Optional[threading.Lock] = None
|
||||
) -> Union[ModelRecordServiceSQL, ModelRecordServiceFile]:
|
||||
```
|
||||
@@ -363,7 +363,7 @@ functionality:
|
||||
|
||||
* Registering a model config record for a model already located on the
|
||||
local filesystem, without moving it or changing its path.
|
||||
|
||||
|
||||
* Installing a model alreadiy located on the local filesystem, by
|
||||
moving it into the InvokeAI root directory under the
|
||||
`models` folder (or wherever config parameter `models_dir`
|
||||
@@ -371,21 +371,21 @@ functionality:
|
||||
|
||||
* Probing of models to determine their type, base type and other key
|
||||
information.
|
||||
|
||||
|
||||
* Interface with the InvokeAI event bus to provide status updates on
|
||||
the download, installation and registration process.
|
||||
|
||||
|
||||
* Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir`.
|
||||
|
||||
* Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16.
|
||||
|
||||
|
||||
* Saving tags and other metadata about the model into the invokeai database
|
||||
when fetching from a repo that provides that type of information,
|
||||
(currently only HuggingFace).
|
||||
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
A default installer is created at InvokeAI api startup time and stored
|
||||
@@ -461,7 +461,7 @@ revision.
|
||||
`config` is an optional dict of values that will override the
|
||||
autoprobed values for model type, base, scheduler prediction type, and
|
||||
so forth. See [Model configuration and
|
||||
probing](#Model-configuration-and-probing) for details.
|
||||
probing](#model-configuration-and-probing) for details.
|
||||
|
||||
`access_token` is an optional access token for accessing resources
|
||||
that need authentication.
|
||||
@@ -494,7 +494,7 @@ source8 = URLModelSource(url='https://civitai.com/api/download/models/63006', ac
|
||||
|
||||
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
||||
install_job = installer.install_model(source)
|
||||
|
||||
|
||||
source2job = installer.wait_for_installs(timeout=120)
|
||||
for source in sources:
|
||||
job = source2job[source]
|
||||
@@ -504,7 +504,7 @@ for source in sources:
|
||||
print(f"{source} installed as {model_key}")
|
||||
elif job.errored:
|
||||
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
||||
|
||||
|
||||
```
|
||||
|
||||
As shown here, the `import_model()` method accepts a variety of
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# InvokeAI Backend Tests
|
||||
|
||||
We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.)
|
||||
We use `pytest` to run the backend python tests. (See [pyproject.toml](https://github.com/invoke-ai/InvokeAI/blob/main/pyproject.toml) for the default `pytest` options.)
|
||||
|
||||
## Fast vs. Slow
|
||||
All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator).
|
||||
@@ -33,7 +33,7 @@ pytest tests -m ""
|
||||
|
||||
## Test Organization
|
||||
|
||||
All backend tests are in the [`tests/`](/tests/) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
|
||||
All backend tests are in the [`tests/`](https://github.com/invoke-ai/InvokeAI/tree/main/tests) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
|
||||
|
||||
TODO: The above statement is aspirational. A re-organization of legacy tests is required to make it true.
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## **What do I need to know to help?**
|
||||
|
||||
If you are looking to help with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
If you are looking to help with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
|
||||
|
||||
## **Get Started**
|
||||
@@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||
* #### [Frontend Documentation](../frontend/index.md)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
@@ -20,15 +20,15 @@ Once you're setup, for more information, you can review the documentation specif
|
||||
|
||||
If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md), [translation](translation.md) or helping support other users and triage issues as they're reported in GitHub.
|
||||
|
||||
There are two paths to making a development contribution:
|
||||
There are two paths to making a development contribution:
|
||||
|
||||
1. Choosing an open issue to address. Open issues can be found in the [Issues](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen) section of the InvokeAI repository. These are tagged by the issue type (bug, enhancement, etc.) along with the “good first issues” tag denoting if they are suitable for first time contributors.
|
||||
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
2. Opening a new issue or feature to add. **Please make sure you have searched through existing issues before creating new ones.**
|
||||
|
||||
*Regardless of what you choose, please post in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord before you start development in order to confirm that the issue or feature is aligned with the current direction of the project. We value our contributors time and effort and want to ensure that no one’s time is being misspent.*
|
||||
|
||||
## Best Practices:
|
||||
## Best Practices:
|
||||
* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
|
||||
* Comments! Commenting your code helps reviewers easily understand your contribution
|
||||
* Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
|
||||
@@ -38,7 +38,7 @@ There are two paths to making a development contribution:
|
||||
|
||||
If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
|
||||
|
||||
For frontend related work, **@psychedelicious** is the best person to reach out to.
|
||||
For frontend related work, **@psychedelicious** is the best person to reach out to.
|
||||
|
||||
For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@psychedelicious**.
|
||||
|
||||
|
||||
@@ -22,15 +22,15 @@ Before starting these steps, ensure you have your local environment [configured
|
||||
2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
|
||||
3. Clone the repository to your local machine using:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
|
||||
If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface. 4. Create a new branch for your fix using:
|
||||
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
|
||||
5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
|
||||
6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:
|
||||
|
||||
@@ -27,9 +27,9 @@ If you just want to use Invoke, you should use the [installer][installer link].
|
||||
|
||||
5. Activate the venv (you'll need to do this every time you want to run the app):
|
||||
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
6. Install the repo as an [editable install][editable install link]:
|
||||
|
||||
@@ -37,7 +37,7 @@ If you just want to use Invoke, you should use the [installer][installer link].
|
||||
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
Refer to the [manual installation][manual install link] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
|
||||
7. Install the frontend dev toolchain:
|
||||
|
||||
|
||||
@@ -34,11 +34,11 @@ Please reach out to @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||
|
||||
## Contributors
|
||||
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for their time, hard work and effort.
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](contributors.md). We thank them for their time, hard work and effort.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/docs/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](../CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
|
||||
@@ -378,6 +378,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self._poll_now()
|
||||
|
||||
async def _on_queue_item_status_changed(self, event: FastAPIEvent[QueueItemStatusChangedEvent]) -> None:
|
||||
# Make sure the cancel event is for the currently processing queue item
|
||||
if self._queue_item and self._queue_item.item_id != event[1].item_id:
|
||||
return
|
||||
if self._queue_item and event[1].status in ["completed", "failed", "canceled"]:
|
||||
# When the queue item is canceled via HTTP, the queue item status is set to `"canceled"` and this event is
|
||||
# emitted. We need to respond to this event and stop graph execution. This is done by setting the cancel
|
||||
|
||||
@@ -32,4 +32,4 @@ def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tenso
|
||||
xk_ = xk.view(*xk.shape[:-1], -1, 1, 2)
|
||||
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
|
||||
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
|
||||
return xq_out.view(*xq.shape), xk_out.view(*xk.shape)
|
||||
return xq_out.view(*xq.shape).type_as(xq), xk_out.view(*xk.shape).type_as(xk)
|
||||
|
||||
@@ -52,16 +52,15 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str,
|
||||
except Exception:
|
||||
# TODO: create issue for support "meta"?
|
||||
checkpoint = safetensors.torch.load_file(path, device="cpu")
|
||||
elif str(path).endswith(".gguf"):
|
||||
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
|
||||
checkpoint = gguf_sd_loader(Path(path), compute_dtype=torch.float32)
|
||||
else:
|
||||
if scan:
|
||||
scan_result = scan_file_path(path)
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.')
|
||||
if str(path).endswith(".gguf"):
|
||||
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
|
||||
checkpoint = gguf_sd_loader(Path(path), compute_dtype=torch.float32)
|
||||
else:
|
||||
checkpoint = torch.load(path, map_location=torch.device("meta"))
|
||||
checkpoint = torch.load(path, map_location=torch.device("meta"))
|
||||
return checkpoint
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Invoke UI
|
||||
|
||||
<https://invoke-ai.github.io/InvokeAI/contributing/frontend/OVERVIEW/>
|
||||
<https://invoke-ai.github.io/InvokeAI/contributing/frontend/>
|
||||
|
||||
@@ -642,12 +642,6 @@
|
||||
"remixImage": "Remix des Bilds erstellen",
|
||||
"imageActions": "Weitere Bildaktionen",
|
||||
"invoke": {
|
||||
"layer": {
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Bbox-Breite ist {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Skalierte Bbox-Breite ist {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Skalierte Bbox-Höhe ist {{height}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Bbox-Höhe ist {{height}}"
|
||||
},
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Breite ist {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Höhe ist {{height}}",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Bbox-Breite ist {{width}}",
|
||||
|
||||
@@ -2133,8 +2133,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>Workflows</StrongComponent>: Run a workflow for a collection of images using the new <StrongComponent>Image Batch</StrongComponent> node.",
|
||||
"<StrongComponent>FLUX</StrongComponent>: Support for XLabs IP Adapter v2."
|
||||
"<StrongComponent>FLUX Regional Guidance (beta)</StrongComponent>: Our beta release of FLUX Regional Guidance is live for regional prompt control.",
|
||||
"<StrongComponent>Various UX Improvements</StrongComponent>: A number of small UX and Quality of Life improvements throughout the app."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -317,18 +317,6 @@
|
||||
"info": "Info",
|
||||
"showOptionsPanel": "Afficher le panneau latéral (O ou T)",
|
||||
"invoke": {
|
||||
"layer": {
|
||||
"rgNoPromptsOrIPAdapters": "aucun prompts ou IP Adapters",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la largeur de la bounding box mise à l'échelle est {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la hauteur de la bounding box mise à l'échelle est {{height}}",
|
||||
"ipAdapterNoModelSelected": "aucun IP adapter sélectionné",
|
||||
"ipAdapterNoImageSelected": "aucune image d'IP adapter sélectionnée",
|
||||
"controlAdapterIncompatibleBaseModel": "modèle de base de Control Adapter incompatible",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la hauteur de la bounding box est {{height}}",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la largeur de la bounding box est {{width}}",
|
||||
"ipAdapterIncompatibleBaseModel": "modèle de base d'IP adapter incompatible",
|
||||
"controlAdapterNoModelSelected": "aucun modèle de Control Adapter sélectionné"
|
||||
},
|
||||
"noPrompts": "Aucun prompts généré",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} entrée manquante",
|
||||
"missingFieldTemplate": "Modèle de champ manquant",
|
||||
|
||||
@@ -663,25 +663,8 @@
|
||||
"addingImagesTo": "Aggiungi immagini a",
|
||||
"systemDisconnected": "Sistema disconnesso",
|
||||
"missingNodeTemplate": "Modello di nodo mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: ingresso mancante",
|
||||
"missingFieldTemplate": "Modello di campo mancante",
|
||||
"layer": {
|
||||
"controlAdapterNoModelSelected": "Nessun modello di adattatore di controllo selezionato",
|
||||
"controlAdapterIncompatibleBaseModel": "Il modello base dell'adattatore di controllo non è compatibile",
|
||||
"ipAdapterNoModelSelected": "Nessun adattatore IP selezionato",
|
||||
"ipAdapterIncompatibleBaseModel": "Il modello base dell'adattatore IP non è compatibile",
|
||||
"ipAdapterNoImageSelected": "Nessuna immagine dell'adattatore IP selezionata",
|
||||
"rgNoPromptsOrIPAdapters": "Nessun prompt o adattatore IP",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, larghezza riquadro è {{width}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, altezza riquadro è {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, larghezza del riquadro scalato {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, altezza del riquadro scalato {{height}}",
|
||||
"rgNegativePromptNotSupported": "prompt negativo non supportato per il modello base selezionato",
|
||||
"rgAutoNegativeNotSupported": "auto-negativo non supportato per il modello base selezionato",
|
||||
"emptyLayer": "livello vuoto",
|
||||
"unsupportedModel": "livello non supportato per il modello base selezionato",
|
||||
"rgReferenceImagesNotSupported": "immagini di riferimento regionali non supportate per il modello base selezionato"
|
||||
},
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), altezza riquadro è {{height}}",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), larghezza riquadro è {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), larghezza del riquadro scalato è {{width}}",
|
||||
@@ -689,10 +672,10 @@
|
||||
"noT5EncoderModelSelected": "Nessun modello di encoder T5 selezionato per la generazione con FLUX",
|
||||
"noCLIPEmbedModelSelected": "Nessun modello CLIP Embed selezionato per la generazione con FLUX",
|
||||
"noFLUXVAEModelSelected": "Nessun modello VAE selezionato per la generazione con FLUX",
|
||||
"canvasIsTransforming": "La tela sta trasformando",
|
||||
"canvasIsRasterizing": "La tela sta rasterizzando",
|
||||
"canvasIsCompositing": "La tela è in fase di composizione",
|
||||
"canvasIsFiltering": "La tela sta filtrando",
|
||||
"canvasIsTransforming": "La tela è occupata (sta trasformando)",
|
||||
"canvasIsRasterizing": "La tela è occupata (sta rasterizzando)",
|
||||
"canvasIsCompositing": "La tela è occupata (in composizione)",
|
||||
"canvasIsFiltering": "La tela è occupata (sta filtrando)",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi elementi, massimo {{maxItems}}",
|
||||
"canvasIsSelectingObject": "La tela è occupata (selezione dell'oggetto)",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi pochi elementi, minimo {{minItems}}",
|
||||
@@ -1207,8 +1190,8 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Percentuale passi Inizio / Fine",
|
||||
"paragraphs": [
|
||||
"La parte del processo di rimozione del rumore in cui verrà applicato l'adattatore di controllo.",
|
||||
"In genere, gli adattatori di controllo applicati all'inizio del processo guidano la composizione, mentre quelli applicati alla fine guidano i dettagli.",
|
||||
"Questa impostazione determina quale parte del processo di rimozione del rumore (generazione) incorpora la guida da questo livello.",
|
||||
"• Passo iniziale (%): specifica quando iniziare ad applicare la guida da questo livello durante il processo di generazione.",
|
||||
"• Passo finale (%): specifica quando interrompere l'applicazione della guida di questo livello e ripristinare la guida generale dal modello e altre impostazioni."
|
||||
]
|
||||
},
|
||||
@@ -1492,9 +1475,9 @@
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Metodo",
|
||||
"heading": "Modalità",
|
||||
"paragraphs": [
|
||||
"Metodo con cui applicare l'adattatore IP corrente."
|
||||
"La modalità definisce il modo in cui l'immagine di riferimento guiderà il processo di generazione."
|
||||
]
|
||||
},
|
||||
"scale": {
|
||||
@@ -1816,7 +1799,7 @@
|
||||
"full": "Stile e Composizione",
|
||||
"style": "Solo Stile",
|
||||
"composition": "Solo Composizione",
|
||||
"ipAdapterMethod": "Metodo Adattatore IP",
|
||||
"ipAdapterMethod": "Modalità",
|
||||
"fullDesc": "Applica lo stile visivo (colori, texture) e la composizione (disposizione, struttura).",
|
||||
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione.",
|
||||
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento."
|
||||
@@ -2071,7 +2054,24 @@
|
||||
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "Come $t(controlLayers.controlLayer) (Ridimensiona)",
|
||||
"newSession": "Nuova sessione",
|
||||
"resetCanvasLayers": "Ripristina livelli Tela"
|
||||
"resetCanvasLayers": "Ripristina livelli Tela",
|
||||
"referenceImageRegional": "Immagine di riferimento (regionale)",
|
||||
"referenceImageGlobal": "Immagine di riferimento (globale)",
|
||||
"warnings": {
|
||||
"controlAdapterNoModelSelected": "nessun modello selezionato per il livello di controllo",
|
||||
"controlAdapterNoControl": "nessun controllo selezionato/disegnato",
|
||||
"ipAdapterNoModelSelected": "nessun modello di immagine di riferimento selezionato",
|
||||
"rgNoPromptsOrIPAdapters": "nessun prompt testuale o immagini di riferimento",
|
||||
"rgReferenceImagesNotSupported": "Immagini di riferimento regionali non supportate per il modello base selezionato",
|
||||
"rgNoRegion": "nessuna regione disegnata",
|
||||
"problemsFound": "Problemi riscontrati",
|
||||
"unsupportedModel": "livello non supportato per il modello base selezionato",
|
||||
"controlAdapterIncompatibleBaseModel": "modello di base del livello di controllo incompatibile",
|
||||
"rgNegativePromptNotSupported": "Prompt negativo non supportato per il modello base selezionato",
|
||||
"ipAdapterIncompatibleBaseModel": "modello base dell'immagine di riferimento incompatibile",
|
||||
"ipAdapterNoImageSelected": "nessuna immagine di riferimento selezionata",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negativo non supportato per il modello base selezionato"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2171,8 +2171,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"<StrongComponent>Flussi di lavoro</StrongComponent>: esegui un flusso di lavoro per una raccolta di immagini utilizzando il nuovo nodo <StrongComponent>Lotto di immagini</StrongComponent>.",
|
||||
"<StrongComponent>Tela</StrongComponent>: elaborazione semplificata del livello di controllo e impostazioni di controllo predefinite migliorate."
|
||||
"<StrongComponent>FLUX Regional Guidance (beta)</StrongComponent>: la nostra versione beta di FLUX Regional Guidance è attiva per il controllo dei prompt regionali.",
|
||||
"<StrongComponent>Vari miglioramenti dell'esperienza utente</StrongComponent>: numerosi piccoli miglioramenti dell'esperienza utente e della qualità della vita in tutta l'app."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -230,15 +230,7 @@
|
||||
"systemDisconnected": "Systeem is niet verbonden",
|
||||
"missingNodeTemplate": "Knooppuntsjabloon ontbreekt",
|
||||
"missingFieldTemplate": "Veldsjabloon ontbreekt",
|
||||
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan",
|
||||
"layer": {
|
||||
"controlAdapterNoModelSelected": "geen controle-adaptermodel geselecteerd",
|
||||
"controlAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor controle-adapter",
|
||||
"ipAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor IP-adapter",
|
||||
"ipAdapterNoImageSelected": "geen afbeelding voor IP-adapter geselecteerd",
|
||||
"rgNoPromptsOrIPAdapters": "geen tekstprompts of IP-adapters",
|
||||
"ipAdapterNoModelSelected": "geen IP-adapter geselecteerd"
|
||||
}
|
||||
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan"
|
||||
},
|
||||
"patchmatchDownScaleSize": "Verklein",
|
||||
"useCpuNoise": "Gebruik CPU-ruis",
|
||||
|
||||
@@ -648,18 +648,6 @@
|
||||
"missingFieldTemplate": "Отсутствует шаблон поля",
|
||||
"addingImagesTo": "Добавление изображений в",
|
||||
"invoke": "Создать",
|
||||
"layer": {
|
||||
"ipAdapterNoModelSelected": "IP адаптер не выбран",
|
||||
"controlAdapterNoModelSelected": "не выбрана модель адаптера контроля",
|
||||
"controlAdapterIncompatibleBaseModel": "несовместимая базовая модель адаптера контроля",
|
||||
"rgNoPromptsOrIPAdapters": "нет текстовых запросов или IP-адаптеров",
|
||||
"ipAdapterIncompatibleBaseModel": "несовместимая базовая модель IP-адаптера",
|
||||
"ipAdapterNoImageSelected": "изображение IP-адаптера не выбрано",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, масштабированная ширина рамки {{width}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, высота рамки {{height}}",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, ширина рамки {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, масштабированная высота рамки {{height}}"
|
||||
},
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), ширина рамки {{width}}",
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), высота рамки {{height}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), масштабированная высота рамки {{height}}",
|
||||
|
||||
@@ -1410,23 +1410,6 @@
|
||||
"processImage": "Xử Lý Hình Ảnh",
|
||||
"useSize": "Dùng Kích Thước",
|
||||
"invoke": {
|
||||
"layer": {
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, tỉ lệ chiều dài hộp giới hạn là {{height}}",
|
||||
"ipAdapterNoModelSelected": "không có IP Adapter được lựa chọn",
|
||||
"ipAdapterNoImageSelected": "không có ảnh IP Adapter được lựa chọn",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, chiều dài hộp giới hạn là {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, tỉ lệ chiều rộng hộp giới hạn là {{width}}",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, chiều rộng hộp giới hạn là {{width}}",
|
||||
"rgNoPromptsOrIPAdapters": "không có lệnh chữ hoặc IP Adapter",
|
||||
"controlAdapterIncompatibleBaseModel": "model cơ sở của Control Adapter không tương thích",
|
||||
"ipAdapterIncompatibleBaseModel": "dạng model cơ sở của IP Adapter không tương thích",
|
||||
"controlAdapterNoModelSelected": "không có model Control Adapter được chọn",
|
||||
"emptyLayer": "layer trống",
|
||||
"rgAutoNegativeNotSupported": "trình tự động đảo chiều không được hỗ trợ cho model cơ sở đang dùng",
|
||||
"rgNegativePromptNotSupported": "lệnh tiêu cực không được hỗ trợ cho model cơ sở đang dùng",
|
||||
"unsupportedModel": "layer không được hỗ trợ cho model cơ sở đang dùng",
|
||||
"rgReferenceImagesNotSupported": "ảnh mẫu khu vực không được hỗ trợ cho model cơ sở đang dùng"
|
||||
},
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều rộng hộp giới hạn là {{width}}",
|
||||
"noModelSelected": "Không có model được lựa chọn",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều dài hộp giới hạn là {{height}}",
|
||||
@@ -1931,7 +1914,24 @@
|
||||
"asControlLayer": "Như $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "Như $t(controlLayers.controlLayer) (Thay Đổi Kích Thước)",
|
||||
"newSession": "Phiên Làm Việc Mới",
|
||||
"resetGenerationSettings": "Khởi Động Lại Cài Đặt Tạo Sinh"
|
||||
"resetGenerationSettings": "Khởi Động Lại Cài Đặt Tạo Sinh",
|
||||
"referenceImageRegional": "Ảnh Mẫu (Khu Vực)",
|
||||
"referenceImageGlobal": "Ảnh Mẫu (Toàn Vùng)",
|
||||
"warnings": {
|
||||
"problemsFound": "Phát hiện vấn đề",
|
||||
"unsupportedModel": "layer không được hỗ trợ cho model cơ sở này",
|
||||
"controlAdapterNoModelSelected": "không có model được chọn cho Layer Chỉnh Sửa Được",
|
||||
"controlAdapterNoControl": "chưa chọn/vẽ điều khiển",
|
||||
"ipAdapterIncompatibleBaseModel": "model cơ sở cho Ảnh Mẫu không tương thích",
|
||||
"ipAdapterNoImageSelected": "chưa chọn Ảnh Mẫu",
|
||||
"controlAdapterIncompatibleBaseModel": "model cơ sở cho Layer Chỉnh Sửa Được không tương thích",
|
||||
"ipAdapterNoModelSelected": "không có model được chọn cho Ảnh Mẫu",
|
||||
"rgNoPromptsOrIPAdapters": "không có lệnh hoặc Ảnh Mẫu",
|
||||
"rgNegativePromptNotSupported": "Lệnh Tiêu Cực không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgNoRegion": "không có khu vực được vẽ"
|
||||
}
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2156,8 +2156,8 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"<StrongComponent>Workflows</StrongComponent>: Chạy một workflow cho nhiều ảnh bằng node <StrongComponent>Ảnh Hàng Loạt</StrongComponent> mới.",
|
||||
"<StrongComponent>FLUX</StrongComponent>: Hỗ trợ cho XLabs IP Adapter v2."
|
||||
"<StrongComponent>Hướng Dẫn Khu Vực FLUX (beta)</StrongComponent>: Bản beta của Hướng Dẫn Khu Vực FLUX của chúng ta đã có mắt tại bảng điều khiển lệnh khu vực.",
|
||||
"<StrongComponent>Nhiều Cải Tiến Ở UX</StrongComponent>: Một số nâng cấp nhỏ ở trải nghiệm và chất lượng người dùng trên toàn bộ ứng dụng."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -661,18 +661,6 @@
|
||||
"missingFieldTemplate": "缺失模板",
|
||||
"addingImagesTo": "添加图像到",
|
||||
"noPrompts": "没有已生成的提示词",
|
||||
"layer": {
|
||||
"ipAdapterNoModelSelected": "未选择IP adapter",
|
||||
"controlAdapterNoModelSelected": "未选择Control Adapter模型",
|
||||
"rgNoPromptsOrIPAdapters": "无文本提示或IP Adapters",
|
||||
"controlAdapterIncompatibleBaseModel": "Control Adapter的基础模型不兼容",
|
||||
"ipAdapterIncompatibleBaseModel": "IP Adapter的基础模型不兼容",
|
||||
"ipAdapterNoImageSelected": "未选择IP Adapter图像",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框宽度为 {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框宽度为 {{width}}"
|
||||
},
|
||||
"canvasIsFiltering": "画布正在过滤",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框高度为 {{height}}",
|
||||
"noCLIPEmbedModelSelected": "未为FLUX生成选择CLIP嵌入模型",
|
||||
|
||||
@@ -17,7 +17,9 @@ type AddIPAdaptersArg = {
|
||||
};
|
||||
|
||||
export const addIPAdapters = ({ entities, g, collector, model }: AddIPAdaptersArg): AddIPAdaptersResult => {
|
||||
const validIPAdapters = entities.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0);
|
||||
const validIPAdapters = entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0);
|
||||
|
||||
const result: AddIPAdaptersResult = {
|
||||
addedIPAdapters: 0,
|
||||
|
||||
@@ -63,12 +63,9 @@ export const addRegions = async ({
|
||||
const isSDXL = model.base === 'sdxl';
|
||||
const isFLUX = model.base === 'flux';
|
||||
|
||||
const validRegions = regions.filter((rg) => {
|
||||
if (!rg.isEnabled) {
|
||||
return false;
|
||||
}
|
||||
return getRegionalGuidanceWarnings(rg, model).length === 0;
|
||||
});
|
||||
const validRegions = regions
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.filter((entity) => getRegionalGuidanceWarnings(entity, model).length === 0);
|
||||
|
||||
const results: AddedRegionResult[] = [];
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ const optionsObject: Record<Language, string> = {
|
||||
sv: 'Svenska',
|
||||
tr: 'Türkçe',
|
||||
ua: 'Украї́нська',
|
||||
vi: 'tiếng Việt',
|
||||
vi: 'Tiếng Việt',
|
||||
zh_CN: '简体中文',
|
||||
zh_Hant: '漢語',
|
||||
};
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "5.4.3rc1"
|
||||
__version__ = "5.4.3"
|
||||
|
||||
Reference in New Issue
Block a user