Compare commits

..

484 Commits

Author SHA1 Message Date
psychedelicious
056c56d322 chore: release v4.2.9.dev20240824 2024-08-24 12:36:25 +10:00
psychedelicious
afc6f83d72 fix(ui): lint & fix issues with adding regional ip adapters 2024-08-24 12:32:38 +10:00
psychedelicious
c776ac3af2 feat(ui): add knipignore tag
I'm not ready to delete some things but still want to build the app.
2024-08-24 12:32:00 +10:00
psychedelicious
b7b3683bef feat(ui): duplicate entity 2024-08-24 12:20:35 +10:00
psychedelicious
fb26b6824a feat(ui): autocomplete on getPrefixeId 2024-08-24 12:20:26 +10:00
psychedelicious
63d8ad912f feat(ui): paste canvas gens back on source in generate mode 2024-08-24 11:56:24 +10:00
psychedelicious
bbd7d7fc17 chore(ui): typegen 2024-08-24 11:55:50 +10:00
psychedelicious
6507a78182 feat(nodes): CanvasV2MaskAndCropInvocation can paste generated image back on source
This is needed for `Generate` mode.
2024-08-24 11:55:43 +10:00
psychedelicious
22f46517f4 fix(ui): extraneous entity preview updates 2024-08-24 11:28:05 +10:00
psychedelicious
45596e1f94 fix(ui): newly-added entities are selected 2024-08-24 11:14:58 +10:00
psychedelicious
6de0dbe854 feat(ui): add crosshair to color picker 2024-08-24 10:51:34 +10:00
psychedelicious
011827fa29 fix(ui): color picker ignores alpha 2024-08-24 10:16:27 +10:00
psychedelicious
fc6d244071 fix(ui): calculate renderable entities correctly in tool module 2024-08-24 10:10:21 +10:00
psychedelicious
cd3da886d6 feat(ui): better color picker 2024-08-24 10:10:04 +10:00
psychedelicious
c013c55d92 feat(ui): colored mask preview image 2024-08-24 08:54:20 +10:00
psychedelicious
cd3dd7db0d fix(ui): new rectangles don't trigger rerender 2024-08-23 23:24:16 +10:00
psychedelicious
1fdcce9429 chore: bump version v4.2.9.dev20240823 2024-08-23 20:52:16 +10:00
psychedelicious
181e40926d feat(ui): disable most interaction while filtering 2024-08-23 20:32:49 +10:00
psychedelicious
c62ede5878 fix(ui): filter preview offset 2024-08-23 20:24:40 +10:00
psychedelicious
a2ad5f1a9a feat(ui): tweak layout of staging area toolbar 2024-08-23 19:55:02 +10:00
psychedelicious
ff74a5356f chore(ui): typegen 2024-08-23 19:52:37 +10:00
psychedelicious
92dc30dace tidy(app): clean up app changes for canvas v2 2024-08-23 19:52:04 +10:00
psychedelicious
3af577b210 feat(ui): use singleton for clear q confirm dialog 2024-08-23 19:47:51 +10:00
psychedelicious
d0464330f7 fix(ui): rip out broken recall logic, NO TS ERRORS 2024-08-23 19:47:51 +10:00
psychedelicious
dd3ef4a80f chore(ui): lint 2024-08-23 19:47:51 +10:00
psychedelicious
0ced891944 fix(ui): staging area interaction scopes 2024-08-23 19:47:51 +10:00
psychedelicious
10a5452df9 fix(ui): staging area actions 2024-08-23 19:47:51 +10:00
psychedelicious
cb97969bbc tidy(ui): more cleanup 2024-08-23 19:47:51 +10:00
psychedelicious
71e742e238 fix(ui): upscale tab graph 2024-08-23 19:47:51 +10:00
psychedelicious
fadd20fb8e fix(ui): sdxl graph builder 2024-08-23 19:47:51 +10:00
psychedelicious
01b9ca78e4 fix(ui): select next entity in the list when deleting 2024-08-23 19:47:51 +10:00
psychedelicious
2baf825f34 feat(ui): fix delete layer hotkey 2024-08-23 19:47:51 +10:00
psychedelicious
1fa8048509 tidy(ui): "eye dropper" -> "color picker" 2024-08-23 19:47:51 +10:00
psychedelicious
a000ad75f6 tidy(ui): regional guidance buttons 2024-08-23 19:47:51 +10:00
psychedelicious
aefb2339bb feat(ui): update entity list menu 2024-08-23 19:47:51 +10:00
psychedelicious
a4f8671f86 feat(ui): add log debug button 2024-08-23 19:47:51 +10:00
psychedelicious
73530ba54f chore(ui): lint 2024-08-23 19:47:51 +10:00
psychedelicious
685eb9927d chore(ui): prettier 2024-08-23 19:47:51 +10:00
psychedelicious
ee57302fc3 chore(ui): eslint 2024-08-23 19:47:51 +10:00
psychedelicious
c1fb9cdb93 tidy(ui): remove unused stuff 4 2024-08-23 19:47:36 +10:00
psychedelicious
aa6d441552 tidy(ui): remove unused stuff 3 2024-08-23 19:47:01 +10:00
psychedelicious
25d8d4c2e9 tidy(ui): remove unused pkg @chakra-ui/react-use-size 2024-08-23 19:47:01 +10:00
psychedelicious
427ea6da5c feat(ui): revise graph building for control layers, fix issues w/ invocation complete events 2024-08-23 19:47:01 +10:00
psychedelicious
d9f4266630 feat(ui): use unique id for metadata in Graph class 2024-08-23 19:47:01 +10:00
psychedelicious
96f6e9e683 tidy(ui): remove unused stuff 2 2024-08-23 19:47:01 +10:00
psychedelicious
f10248e3f5 tidy(ui): remove unused stuff 2024-08-23 19:47:01 +10:00
psychedelicious
6a21f5fde1 tidy(ui): reduce use of parseify util 2024-08-23 19:47:01 +10:00
psychedelicious
ff20dd509a feat(ui): refine canvas entity list items & menus 2024-08-23 19:47:01 +10:00
psychedelicious
78a59b5b78 feat(ui): canvas layer preview, revised reactivity for adapters 2024-08-23 19:47:01 +10:00
psychedelicious
46bfbbbc87 feat(ui): add SyncableMap
Can be used with useSyncExternal store to make a `Map` reactive.
2024-08-23 19:47:01 +10:00
psychedelicious
a6d73d0773 tidy(ui): removed unused transform methods from canvasmanager 2024-08-23 19:47:01 +10:00
psychedelicious
6578e8bef8 feat(ui): transform tool ux 2024-08-23 19:47:01 +10:00
psychedelicious
0596d25e07 feat(ui): rough out canvas mode 2024-08-23 19:47:01 +10:00
psychedelicious
86e8ce9139 feat(ui): add canvas autosave checkbox 2024-08-23 19:47:01 +10:00
psychedelicious
5aa2957da4 fix(ui): memory leak when getting image DTO
must unsubscribe!
2024-08-23 19:47:01 +10:00
psychedelicious
82f0cb2c8c feat(ui): rework settings menu 2024-08-23 19:47:01 +10:00
psychedelicious
fa48145cbc feat(ui): no entities fallback buttons 2024-08-23 19:47:01 +10:00
psychedelicious
6d1edc330d perf(ui): optimize gallery image delete button rendering 2024-08-23 19:47:01 +10:00
psychedelicious
97c0d3f6be feat(ui): remove "solid" background option 2024-08-23 19:47:01 +10:00
psychedelicious
a79a25ad63 tidy(ui): organise files and classes 2024-08-23 19:47:01 +10:00
psychedelicious
6a8ceef404 tidy(ui): abstract compositing logic to module 2024-08-23 19:47:01 +10:00
psychedelicious
3539670d93 fix(ui): fix canvas cache property access 2024-08-23 19:47:01 +10:00
psychedelicious
c54bc32ef6 tidy(ui): clean up CanvasFilter class 2024-08-23 19:47:01 +10:00
psychedelicious
fee293e289 tidy(ui): clean up a few bits and bobs 2024-08-23 19:47:01 +10:00
psychedelicious
747eef9ccc tidy(ui): abstract canvas rendering logic to module 2024-08-23 19:47:01 +10:00
psychedelicious
7d2df399ed tidy(ui): abstract caching logic to module 2024-08-23 19:47:01 +10:00
psychedelicious
68fad5cdcc tidy(ui): abstract worker logic to module 2024-08-23 19:47:01 +10:00
psychedelicious
b4d656c203 tidy(ui): abstract stage logic into module 2024-08-23 19:47:01 +10:00
psychedelicious
3136d89d52 feat(ui): add entity group hiding 2024-08-23 19:47:01 +10:00
psychedelicious
27e829b955 feat(ui): move all caching out of redux
While we lose the benefit of the caches persisting across reloads, this is a much simpler way to handle things. If we need a persistent cache, we can explore it in the future.
2024-08-23 19:47:01 +10:00
psychedelicious
e03e870d5b feat(ui): revised rasterization caching
- use `stable-hash` to generate stable, non-crypto hashes for cache entries, instead of using deep object comparisons
- use an object to store image name caches
2024-08-23 19:47:01 +10:00
psychedelicious
9465ff450b feat(ui): revise filter implementation 2024-08-23 19:47:01 +10:00
psychedelicious
92906a9575 fix(ui): add button to delete inpaint mask 2024-08-23 19:47:01 +10:00
psychedelicious
77f206abe4 feat(ui): add contexts/hooks to access entity adapters directly 2024-08-23 19:47:01 +10:00
psychedelicious
44a3f61580 feat(ui): add CanvasManagerProviderGate
This context waits to render its children its until the canvas manager is available. Then its children have access to the manager directly via hook.
2024-08-23 19:47:01 +10:00
psychedelicious
0a2afed08b feat(ui) do not set $canvasManager until ready 2024-08-23 19:47:01 +10:00
psychedelicious
9b3b961105 fix(ui): inpaint mask naming 2024-08-23 19:47:01 +10:00
psychedelicious
9b1828e1aa feat(ui): efficient canvas compositing
Also solves issue of exporting layers at different opacities than what is visible
2024-08-23 19:47:01 +10:00
psychedelicious
5101873f49 feat(ui): allow multiple inpaint masks
This is easier than making it a nullable singleton
2024-08-23 19:47:01 +10:00
psychedelicious
c612f18114 fix(ui): missing rasterization cache invalidations 2024-08-23 19:47:01 +10:00
psychedelicious
7e400d876f feat(ui): iterate on filter UI, flow 2024-08-23 19:47:01 +10:00
psychedelicious
677dddcfc9 fix(ui): rehydration data loss 2024-08-23 19:47:01 +10:00
psychedelicious
0792b9175e feat(ui): sort log namespaces 2024-08-23 19:47:01 +10:00
psychedelicious
e4829f80af fix(ui): do not merge arrays by index during rehydration 2024-08-23 19:47:01 +10:00
psychedelicious
bb760f3eb4 fix(ui): clone parsed data during state rehydration
Without this, the objects and arrays in `parsed` could be mutated, and the log statment would show the mutated data.
2024-08-23 19:47:01 +10:00
psychedelicious
388c65287b fix(ui): fix logger filter
was accidetnally replacing the filter instead of appending to it.
2024-08-23 19:47:01 +10:00
psychedelicious
12cd41e05c fix(ui): race condition queue status
Sequence of events causing the race condition:
- Enqueue batch
- Invalidate `SessionQueueStatus` tag
- Request updated queue status via HTTP - batch still processing at this point
- Batch completes
- Event emitted saying so
- Optimistically update the queue status cache, it is correct
- HTTP request makes it back and overwrites the optimistic update, indicating the batch is still in progress

FIxed by not invalidating the cache.
2024-08-23 19:47:01 +10:00
psychedelicious
7765c03949 fix(ui): handle opacity for masks 2024-08-23 19:47:01 +10:00
psychedelicious
3daa80c57f feat(ui): default background to checkerboard 2024-08-23 19:47:01 +10:00
psychedelicious
5dbbef4ebd feat(ui): clean up logging namespaces, allow skipping namespaces 2024-08-23 19:47:01 +10:00
psychedelicious
db33b3f7b5 chore(ui): bump ui library 2024-08-23 19:47:01 +10:00
psychedelicious
8ffcf2a6be fix(ui): do not allow drawing if layer disabled 2024-08-23 19:47:01 +10:00
psychedelicious
2e7ae6a07e fix(ui): stale state causing race conditions & extraneous renders 2024-08-23 19:47:01 +10:00
psychedelicious
fea1711f0c fix(ui): do not clear buffer when rendering "real" objects 2024-08-23 19:47:01 +10:00
psychedelicious
2a3546db97 tidy(ui): remove "filter" from CanvasImageState 2024-08-23 19:47:01 +10:00
psychedelicious
285c266612 feat(ui): better editable title 2024-08-23 19:47:01 +10:00
psychedelicious
426ad54c53 fix(ui): stroke eraserline 2024-08-23 19:47:01 +10:00
psychedelicious
fc75f7919f feat(ui): restore transparency effect for control layers 2024-08-23 19:47:01 +10:00
psychedelicious
6c6b1aaff6 feat(ui): use text cursor for entity title 2024-08-23 19:47:01 +10:00
psychedelicious
c319d653ac tidy(ui): remove extraneous logging in CanvasStateApi 2024-08-23 19:47:01 +10:00
psychedelicious
d887e474e7 feat(ui): better buffer commit logic 2024-08-23 19:47:01 +10:00
psychedelicious
da7b52d6ba feat(ui): render buffer separately from "real" objects 2024-08-23 19:47:01 +10:00
psychedelicious
b5aa308593 fix(ui): pixelRect should always be integer 2024-08-23 19:47:01 +10:00
psychedelicious
0b7ceb3bb6 fix(ui): only update stage attrs when stage itself is dragged 2024-08-23 19:47:01 +10:00
psychedelicious
3a70cefda2 feat(ui): add line simplification
This fixes some awkward issues where line segments stack up.
2024-08-23 19:47:01 +10:00
psychedelicious
4b609251e1 fix(ui): various things listening when they need not listen 2024-08-23 19:47:01 +10:00
psychedelicious
0839eac0f7 feat(ui): layer opacity via caching 2024-08-23 19:47:01 +10:00
psychedelicious
5f2a7feeee feat(ui): reset view fits all visible objects 2024-08-23 19:47:01 +10:00
psychedelicious
982535eb92 fix(ui): rerenders when changing canvas scale 2024-08-23 19:47:01 +10:00
psychedelicious
0c2b8edc8d fix(ui): do not render rasterized layer unless renderObjects=true 2024-08-23 19:47:01 +10:00
psychedelicious
f78f4ca25f feat(ui): revise app layout strategy, add interaction scopes for hotkeys 2024-08-23 19:47:01 +10:00
psychedelicious
d6b3e6c07d feat(ui): tweak mask patterns 2024-08-23 19:47:01 +10:00
psychedelicious
071ff8e74a fix(ui): dynamic prompts recalcs when presets are loaded 2024-08-23 19:47:01 +10:00
psychedelicious
1ea8aafca1 fix(ui): use style preset prompts correctly 2024-08-23 19:46:05 +10:00
psychedelicious
533dd221f8 fix(ui): discard selected staging image not all other images 2024-08-23 19:46:05 +10:00
psychedelicious
2b325c6683 fix(ui): respect image size in staging preview 2024-08-23 19:46:05 +10:00
psychedelicious
3845b1b3e6 tidy(ui): cleanup after events change 2024-08-23 19:46:05 +10:00
psychedelicious
cea7890a67 feat(ui): move socket event handling out of redux
Download events and invocation status events (including progress images) are very frequent. There's no real need for these to pass through redux. Handling them outside redux is a significant performance win - far fewer store subscription calls, far fewer trips through middleware.

All event handling is moved outside middleware. Cleanup of unused actions and listeners to follow.
2024-08-23 19:46:05 +10:00
psychedelicious
c38fe8025d fix(ui): rebase conflicts 2024-08-23 19:46:05 +10:00
psychedelicious
f1de95349c fix(ui): update compositing rect when fill changes 2024-08-23 19:46:05 +10:00
psychedelicious
2950775fa7 feat(ui): add canvas background style 2024-08-23 19:46:05 +10:00
psychedelicious
cb293fd7ac feat(ui): mask layers choose own opacity 2024-08-23 19:46:05 +10:00
psychedelicious
43b3fab6be feat(ui): mask fill patterns 2024-08-23 19:46:05 +10:00
psychedelicious
d4b0dbce49 build(ui): add vite types to tsconfig 2024-08-23 19:46:05 +10:00
psychedelicious
137b810669 fix(ui): do not smooth pixel data when using eyeDropper 2024-08-23 19:46:05 +10:00
psychedelicious
c172657324 tidy(ui): tool components & translations 2024-08-23 19:46:05 +10:00
psychedelicious
97c966b04f feat(ui): rough out eyedropper tool
It's a bit slow bc we are converting the stage to canvas on every mouse move. Also need to improve the visual but it works.
2024-08-23 19:46:05 +10:00
psychedelicious
7178fc6253 fix(ui): ip adapters work 2024-08-23 19:46:05 +10:00
psychedelicious
4adb2eabf5 feat(ui): rename layers 2024-08-23 19:46:05 +10:00
psychedelicious
9f2c815e13 feat(ui): revise entity menus 2024-08-23 19:46:05 +10:00
psychedelicious
1435557d1d feat(ui): split control layers from raster layers for UI and internal state, same rendering as raster layers 2024-08-23 19:46:05 +10:00
psychedelicious
96abf687f6 feat(ui): implement cache for image rasterization, rip out some old controladapters code 2024-08-23 19:46:05 +10:00
psychedelicious
636d9a7209 feat(ui, app): use layer as control (wip) 2024-08-23 19:46:05 +10:00
psychedelicious
3b36eb0223 feat(ui): add contextmenu for canvas entities 2024-08-23 19:46:05 +10:00
psychedelicious
388c97bff0 feat(ui): more better logging & naming 2024-08-23 19:46:05 +10:00
psychedelicious
b1cb018695 feat(ui): better logging w/ path 2024-08-23 19:46:05 +10:00
psychedelicious
df78dd7953 feat(ui): always show marks on canvas scale slider 2024-08-23 19:46:05 +10:00
psychedelicious
0dc344a22e fix(ui): do not import button from chakra 2024-08-23 19:46:05 +10:00
psychedelicious
350d7f6f14 fix(ui): scaled bbox preview 2024-08-23 19:46:05 +10:00
psychedelicious
11059ee2d4 feat(ui): tidy up atoms 2024-08-23 19:46:05 +10:00
psychedelicious
c90d3f3bb9 feat(ui): convert all my pubsubs to atoms
its the same but better
2024-08-23 19:46:05 +10:00
psychedelicious
7f6d439fd1 feat(ui): add trnalsation 2024-08-23 19:46:05 +10:00
psychedelicious
783a78f069 fix(ui): give up on thumbnail loading, causes flash during transformer 2024-08-23 19:46:05 +10:00
psychedelicious
0ff031950d fix(ui): depth anything v2 2024-08-23 19:46:05 +10:00
psychedelicious
d7e8f3d756 tidy(ui): remove unused code, comments 2024-08-23 19:46:05 +10:00
psychedelicious
4668ea449b fix(ui): staging area works 2024-08-23 19:46:05 +10:00
psychedelicious
30d318d021 feat(nodes): temp disable canvas output crop 2024-08-23 19:46:05 +10:00
psychedelicious
de96f97e5f fix(ui): max scale 1 when reset view 2024-08-23 19:46:05 +10:00
psychedelicious
57c0a2dfb1 feat(ui): better scale changer component, reset view functionality 2024-08-23 19:46:05 +10:00
psychedelicious
cd4e464bde fix(ui): img2img 2024-08-23 19:46:05 +10:00
psychedelicious
49e48c3eb7 feat(ui): add manual scale controls 2024-08-23 19:46:05 +10:00
psychedelicious
edd3b3bce9 fix(ui): do not await clearBuffer 2024-08-23 19:46:04 +10:00
psychedelicious
f8bfb66108 feat(ui): dnd image into layer 2024-08-23 19:46:04 +10:00
psychedelicious
3b6a76cbf3 fix(ui): do not await commitBuffer 2024-08-23 19:46:04 +10:00
psychedelicious
e0b60e4320 fix(ui): properly destroy entities in manager cleanup 2024-08-23 19:46:04 +10:00
psychedelicious
2159319035 tidy(ui): clearer component names for regional guidance 2024-08-23 19:46:04 +10:00
psychedelicious
b170fc232e tidy(ui): clearer component names for ip adapter 2024-08-23 19:46:04 +10:00
psychedelicious
594da60f2f tidy(ui): clearer component names for inpaint mask 2024-08-23 19:46:04 +10:00
psychedelicious
6a432f6518 tidy(ui): clearer component names for control adapters 2024-08-23 19:46:04 +10:00
psychedelicious
eb8eacfec6 feat(ui): simplify canvas list item headers 2024-08-23 19:46:04 +10:00
psychedelicious
c8d04d42e2 fix(ui): ip adapter list item 2024-08-23 19:46:04 +10:00
psychedelicious
d39c9de81e tidy(ui): clean up unused logic 2024-08-23 19:46:04 +10:00
psychedelicious
a27d39b9ff feat(ui): clean up state, add mutex for image loading, add thumbnail loading 2024-08-23 19:46:04 +10:00
psychedelicious
6b385614f0 chore(ui): add async-mutex dep 2024-08-23 19:46:04 +10:00
psychedelicious
3ae7250ef7 feat(ui): txt2img, img2img, inpaint & outpaint working 2024-08-23 19:46:04 +10:00
psychedelicious
a42d0ce1d2 feat(ui): no padding on transformer outlines 2024-08-23 19:46:04 +10:00
psychedelicious
d9131f7563 feat(ui): restore object count to layer titles 2024-08-23 19:46:04 +10:00
psychedelicious
bdce958f29 tidy(ui): "useIsEntitySelected" -> "useEntityIsSelected" 2024-08-23 19:46:04 +10:00
psychedelicious
3c86f1e979 tidy(ui): move transformer statics into class 2024-08-23 19:46:04 +10:00
psychedelicious
894b8a29b9 tidy(ui): massive cleanup
- create a context for entity identifiers, massively simplifying UI for each entity int he list
- consolidate common redux actions
- remove now-unused code
2024-08-23 19:46:04 +10:00
psychedelicious
8436a44973 perf(ui): do not add duplicate points to lines 2024-08-23 19:46:04 +10:00
psychedelicious
f9f9ec3688 feat(ui): up line tension to 0.3 2024-08-23 19:46:04 +10:00
psychedelicious
5a98d7a1f6 perf(ui): disable stroke, perfect draw on compositing rect 2024-08-23 19:46:04 +10:00
psychedelicious
f9bc96e497 tidy(ui): remove unused code, initial image 2024-08-23 19:46:04 +10:00
psychedelicious
56350ff5dc tidy(ui): remove unused state & actions 2024-08-23 19:46:04 +10:00
psychedelicious
6c1139340c feat(ui): region mask rendering 2024-08-23 19:46:04 +10:00
psychedelicious
641b1a7e6f feat(ui): esc cancels drawing buffer
maybe this is not wanted? we'll see
2024-08-23 19:46:04 +10:00
psychedelicious
674a3f462f fix(ui): render transformer over objects, fix issue w/ inpaint rect color 2024-08-23 19:46:04 +10:00
psychedelicious
2283186d3a fix(ui): brush preview fill for inpaint/region 2024-08-23 19:46:04 +10:00
psychedelicious
340af1fe50 fix(ui): no objects rendered until vis toggled 2024-08-23 19:46:04 +10:00
psychedelicious
9378656d78 feat(ui): inpaint mask transform 2024-08-23 19:46:04 +10:00
psychedelicious
5f0413e222 fix(ui): layer accidental early set isFirstRender=false 2024-08-23 19:46:04 +10:00
psychedelicious
c3e47515b1 fix(ui): inpaint mask rendering 2024-08-23 19:46:04 +10:00
psychedelicious
5dcef6fa0d feat(ui): wip inpaint mask uses new API 2024-08-23 19:46:04 +10:00
psychedelicious
31ac02cd93 feat(ui): move updatePosition to transformer 2024-08-23 19:46:04 +10:00
psychedelicious
ab16976084 feat(ui): move resetScale to transformer 2024-08-23 19:46:04 +10:00
psychedelicious
8e2b7845e1 tidy(ui): more imperative naming 2024-08-23 19:46:04 +10:00
psychedelicious
3973bce342 tidy(ui): use imperative names for setters in stateapi 2024-08-23 19:46:04 +10:00
psychedelicious
f63847a504 fix(ui): commit drawing buffer on tool change, fixing bbox not calculating 2024-08-23 19:46:04 +10:00
psychedelicious
07e3529948 fix(ui): sync transformer when requesting bbox calc 2024-08-23 19:46:04 +10:00
psychedelicious
03e1c60694 tidy(ui): rename union CanvasEntity -> CanvasEntityState 2024-08-23 19:46:04 +10:00
psychedelicious
d766ed71fc fix(ui): request rect calc immediately on transform, hiding rect 2024-08-23 19:46:04 +10:00
psychedelicious
ae68ef142a feat(ui): move bbox calculation to transformer 2024-08-23 19:46:04 +10:00
psychedelicious
20f55768c4 feat(ui): use set for transformer subscriptions 2024-08-23 19:46:04 +10:00
psychedelicious
c4fad4456e tidy(ui): clean up worker tasks when complete 2024-08-23 19:46:04 +10:00
psychedelicious
78f5ec44ad tidy(ui): remove unused code in CanvasTool 2024-08-23 19:46:04 +10:00
psychedelicious
e14ba86942 feat(ui): use pubsub for isTransforming on manager 2024-08-23 19:46:04 +10:00
psychedelicious
d4e7720f6b docs(ui): update transformer docstrings 2024-08-23 19:46:04 +10:00
psychedelicious
a3f0e7e1cb feat(ui): revised event pubsub, transformer logic split out 2024-08-23 19:46:04 +10:00
psychedelicious
30a696c476 feat(ui): add simple pubsub 2024-08-23 19:46:04 +10:00
psychedelicious
66d6c64e16 feat(ui): document & clean up object renderer 2024-08-23 19:46:04 +10:00
psychedelicious
d15be9b57c feat(ui): split out object renderer 2024-08-23 19:46:04 +10:00
psychedelicious
e5da902fd0 fix(ui): unable to hold shit while transforming to retain ratio 2024-08-23 19:46:04 +10:00
psychedelicious
fc558094c2 tidy(ui): rename canvas stuff 2024-08-23 19:46:04 +10:00
psychedelicious
ad9312e989 tidy(ui): consolidate getLoggingContext builders 2024-08-23 19:46:04 +10:00
psychedelicious
8e1a70b008 fix(ui): align all tools to 1px grid
- Offset brush tool by 0.5px when width is odd, ensuring each stroke edge is exactly on a pixel boundary
- Round the rect tool also
2024-08-23 19:46:04 +10:00
psychedelicious
17f88cd5ad feat(ui): disable image smoothing on layers 2024-08-23 19:46:04 +10:00
psychedelicious
298f1919fa fix(ui): round position when rasterizing layer 2024-08-23 19:46:04 +10:00
psychedelicious
4d20cc11d4 feat(ui): continue modularizing transform 2024-08-23 19:46:04 +10:00
psychedelicious
14f249a2f0 feat(ui): fix a few things that didn't unsubscribe correctly, add helper to manage subscriptions 2024-08-23 19:46:04 +10:00
psychedelicious
b9746a6c2c feat(ui): merge bbox outline into transformer 2024-08-23 19:46:04 +10:00
psychedelicious
94f298a6f4 fix(ui): update parent's pos not transformers 2024-08-23 19:46:04 +10:00
psychedelicious
8d3a8178da feat(ui): merge interaction rect into transformer class 2024-08-23 19:46:04 +10:00
psychedelicious
cad4212fe8 feat(ui): prepare staging area 2024-08-23 19:46:04 +10:00
psychedelicious
cff28dfaa9 feat(ui): typing for logging context 2024-08-23 19:46:04 +10:00
psychedelicious
70d7509fcc feat(ui): remove inheritance of CanvasObject
JS is terrible
2024-08-23 19:46:04 +10:00
psychedelicious
cf83af7a27 feat(ui): split & document transformer logic, iterate on class structures 2024-08-23 19:46:04 +10:00
psychedelicious
5c5a405c0f feat(ui): rotation snap to nearest 45deg when holding shift 2024-08-23 19:46:04 +10:00
psychedelicious
0208e4b232 feat(ui): expose subscribe method for nanostores 2024-08-23 19:46:04 +10:00
psychedelicious
e940754795 tidy(ui): remove layer scaling reducers 2024-08-23 19:46:04 +10:00
psychedelicious
dc9fa1a735 fix(ui): pixel-perfect transforms 2024-08-23 19:46:04 +10:00
psychedelicious
08591fbf6d fix(ui): layer visibility toggle 2024-08-23 19:46:04 +10:00
psychedelicious
74db71bb5d fix(nodes): fix canvas mask erode
it wasn't eroding enough and caused incorrect transparency in result images
2024-08-23 19:46:04 +10:00
psychedelicious
60dbe798a5 fix(ui): do not reset layer on first render 2024-08-23 19:46:04 +10:00
psychedelicious
0e676605fe feat(ui): revised logging and naming setup, fix staging area 2024-08-23 19:46:04 +10:00
psychedelicious
3f781016f6 feat(ui): add repr methods to layer and object classes 2024-08-23 19:46:04 +10:00
psychedelicious
17cd2f6b02 feat(ui): use nanoid(10) instead of uuidv4 for canvas
Shorter ids makes it much more readable
2024-08-23 19:46:04 +10:00
psychedelicious
99102a1b34 build(ui): add nanoid as explicit dep 2024-08-23 19:46:04 +10:00
psychedelicious
8d72e7d9e8 fix(ui): move CanvasImage's konva image to correct object 2024-08-23 19:46:04 +10:00
psychedelicious
0b6b6f97ad fix(ui): prevent flash when applying transform 2024-08-23 19:46:04 +10:00
psychedelicious
fb2f6382b1 build(ui): add eslint rules for async stuff 2024-08-23 19:46:04 +10:00
psychedelicious
1ddea87c35 feat(ui): trying to fix flicker after transform 2024-08-23 19:46:04 +10:00
psychedelicious
ea02323095 feat(ui): transform cleanup 2024-08-23 19:46:04 +10:00
psychedelicious
49733091c7 feat(ui): fix transform when rotated 2024-08-23 19:46:04 +10:00
psychedelicious
cf833fd6e2 fix(ui): use pixel bbox when image is in layer 2024-08-23 19:46:04 +10:00
psychedelicious
ba5cf07ab8 fix(ui): transforming when axes flipped 2024-08-23 19:46:04 +10:00
psychedelicious
d15321a373 feat(ui): hallelujah (???) 2024-08-23 19:46:04 +10:00
psychedelicious
de597a5eb4 feat(ui): add debug button 2024-08-23 19:46:04 +10:00
psychedelicious
e5f5cbdf5c fix(ui): transformer padding 2024-08-23 19:46:04 +10:00
psychedelicious
7d4342bbff feat(ui): wip transform mode 2 2024-08-23 19:46:04 +10:00
psychedelicious
7f8a1d8d20 feat(ui): wip transform mode 2024-08-23 19:46:04 +10:00
psychedelicious
65353ac1e1 feat(ui): wip transform mode 2024-08-23 19:46:04 +10:00
psychedelicious
7f9a31ca4a fix(ui): dnd to canvas broke 2024-08-23 19:46:04 +10:00
psychedelicious
592eb2886c fix(ui): conflicts after rebasing 2024-08-23 19:46:04 +10:00
psychedelicious
c220dd8987 fix(ui): imageDropped listener 2024-08-23 19:46:04 +10:00
psychedelicious
a263beb0d5 wip 2024-08-23 19:46:04 +10:00
psychedelicious
46b7c510eb fix(ui): transform tool seems to be working 2024-08-23 19:46:04 +10:00
psychedelicious
f405e472ea fix(ui): move tool fixes, add transform tool 2024-08-23 19:46:04 +10:00
psychedelicious
7bdfd3ef5f feat(ui): move tool now only moves 2024-08-23 19:46:04 +10:00
psychedelicious
778ee2c679 feat(ui): layer bbox calc in worker 2024-08-23 19:46:04 +10:00
psychedelicious
e70339ff3e feat(ui): tweaked entity & group selection styles 2024-08-23 19:46:04 +10:00
psychedelicious
88c57a9750 feat(ui): canvas entity list headers 2024-08-23 19:46:04 +10:00
psychedelicious
137252128b tidy(ui): CanvasRegion 2024-08-23 19:46:04 +10:00
psychedelicious
d4297b1345 tidy(ui): CanvasRect 2024-08-23 19:46:04 +10:00
psychedelicious
6059bc7b47 tidy(ui): CanvasLayer 2024-08-23 19:46:04 +10:00
psychedelicious
c3ff3eb51f tidy(ui): CanvasInpaintMask 2024-08-23 19:46:04 +10:00
psychedelicious
0b7751c413 tidy(ui): CanvasInitialImage 2024-08-23 19:46:04 +10:00
psychedelicious
d7f1c30624 tidy(ui): CanvasImage 2024-08-23 19:46:04 +10:00
psychedelicious
3f4d7dbeea tidy(ui): CanvasEraserLine 2024-08-23 19:46:04 +10:00
psychedelicious
19b6ae2907 tidy(ui): CanvasControlAdapter 2024-08-23 19:46:04 +10:00
psychedelicious
769f96ff9f tidy(ui): CanvasBrushLine 2024-08-23 19:46:04 +10:00
psychedelicious
fdaf75faa4 tidy(ui): CanvasBbox 2024-08-23 19:46:04 +10:00
psychedelicious
1380bb7ae6 tidy(ui): CanvasBackground 2024-08-23 19:46:04 +10:00
psychedelicious
9483c8cc29 tidy(ui): update canvas classes, organise location of konva nodes 2024-08-23 19:46:04 +10:00
psychedelicious
2ef8a8cf5a feat(ui): add names to all konva objects
Makes troubleshooting much simpler
2024-08-23 19:46:04 +10:00
psychedelicious
d296ec1932 fix(ui): do not await creating new canvas image
If you await this, it causes a race condition where multiple images are created.
2024-08-23 19:46:04 +10:00
psychedelicious
444ad3dae1 feat(ui): use position and dimensions instead of separate x,y,width,height attrs 2024-08-23 19:46:04 +10:00
psychedelicious
8cdcc71378 fix(ui): remove weird rtkq hook wrapper
I do not understand why I did that initially but it doesn't work with TS.
2024-08-23 19:46:04 +10:00
psychedelicious
e8bc06cfd3 feat(ui): rename types size and position to dimensions and coordinate 2024-08-23 19:46:04 +10:00
psychedelicious
67a0a024e9 tidy(ui): hide layer settings by default 2024-08-23 19:46:04 +10:00
psychedelicious
bd2c46c267 fix(ui): layer rendering when starting as disabled 2024-08-23 19:46:04 +10:00
psychedelicious
5acb27f350 feat(invocation): reduce canvas v2 mask & crop mask dilation 2024-08-23 19:46:04 +10:00
psychedelicious
7271b12d0f feat(ui): de-jank staging area and progress images 2024-08-23 19:46:04 +10:00
psychedelicious
4a79467a33 feat(ui): update staging handling to work w/ cropped mask 2024-08-23 19:46:04 +10:00
psychedelicious
5501bb87a3 chore(ui): typegen 2024-08-23 19:46:04 +10:00
psychedelicious
561610e296 feat(app): update CanvasV2MaskAndCropInvocation 2024-08-23 19:46:04 +10:00
psychedelicious
b76609ef18 feat(ui): use new canvas output node 2024-08-23 19:46:04 +10:00
psychedelicious
070b78501b chore(ui): typegen 2024-08-23 19:46:04 +10:00
psychedelicious
50df4f4ab6 feat(app): add CanvasV2MaskAndCropInvocation & CanvasV2MaskAndCropOutput
This handles some masking and cropping that the canvas needs.
2024-08-23 19:46:04 +10:00
psychedelicious
9bbf430125 fix(ui): restore nodes output tracking 2024-08-23 19:46:04 +10:00
psychedelicious
384a90958a feat(ui): rip out document size
barely knew ye
2024-08-23 19:46:04 +10:00
psychedelicious
0e4a25b029 feat(ui): convert initial image to layer when starting canvas session 2024-08-23 19:46:04 +10:00
psychedelicious
4a44e171fd fix(ui): fix layer transparency calculation 2024-08-23 19:46:04 +10:00
psychedelicious
9bc57a6f59 fix(ui): reset initial image when resetting canvas 2024-08-23 19:46:03 +10:00
psychedelicious
4341ed7ab4 fix(ui): reset node executions states when loading workflow 2024-08-23 19:46:03 +10:00
psychedelicious
97ce72c542 fix(ui): entity display list 2024-08-23 19:46:03 +10:00
psychedelicious
a2c78a57a7 feat(ui): img2img working 2024-08-23 19:46:03 +10:00
psychedelicious
044a713dc9 feat(ui): rough out img2img on canvas 2024-08-23 19:46:03 +10:00
psychedelicious
b8479c5fe2 UNDO ME WIP 2024-08-23 19:46:03 +10:00
psychedelicious
4e5d056824 feat(ui): log invocation source id on socket event 2024-08-23 19:46:03 +10:00
psychedelicious
118278b372 feat(ui): restore document size overlay renderer 2024-08-23 19:46:03 +10:00
psychedelicious
8e8c255f3f feat(ui): make documnet size a rect 2024-08-23 19:46:03 +10:00
psychedelicious
1575bee401 refactor(ui): remove modular imagesize components
This is no longer necessary with canvas v2 and added a ton of extraneous redux actions when changing the image size. Also renamed to document size
2024-08-23 19:46:03 +10:00
psychedelicious
249bbfc883 feat(ui): initialState is for generation mode 2024-08-23 19:46:03 +10:00
psychedelicious
3993ae410f feat(ui): split out canvas entity list component 2024-08-23 19:46:03 +10:00
psychedelicious
edf040e3d2 feat(ui): hide bbox button when no canvas session active 2024-08-23 19:46:03 +10:00
psychedelicious
66fd077ee7 tidy(ui): remove unused naming objects/utils
The canvas manager means we don't need to worry about konva node names as we never directly select konva nodes.
2024-08-23 19:46:03 +10:00
psychedelicious
b93462ebb6 feat(ui): split up tool chooser buttons
Prep for distinct toolbars for generation vs canvas modes
2024-08-23 19:46:03 +10:00
psychedelicious
aae60d0cdc feat(ui): add useAssertSingleton util hook
This simple hook asserts that it is only ever called once. Particularly useful for things like hotkeys hooks.
2024-08-23 19:46:03 +10:00
psychedelicious
d4da00e607 feat(ui): "stagingArea" -> "session" 2024-08-23 19:46:03 +10:00
psychedelicious
0c539ff00b feat(ui): add reset button to canvas 2024-08-23 19:46:03 +10:00
psychedelicious
5983cbf26c feat(ui): add snapToRect util 2024-08-23 19:46:03 +10:00
psychedelicious
c513d6e3af fix(ui): fiddle with control adapter filters
some jank still
2024-08-23 19:46:03 +10:00
psychedelicious
9d57c0e631 feat(ui): temp disable doc size overlay 2024-08-23 19:46:03 +10:00
psychedelicious
a1923a8966 feat(ui): no animation on layer selection
Felt sluggish
2024-08-23 19:46:03 +10:00
psychedelicious
d988e18731 feat(ui): use canvas as source for control images (wip) 2024-08-23 19:46:03 +10:00
psychedelicious
51008da2dd fix(ui): control adapter translate & scale 2024-08-23 19:46:03 +10:00
psychedelicious
6ccc1f5672 tidy(ui): removed unused state related to non-buffered drawing 2024-08-23 19:46:03 +10:00
psychedelicious
4a556f84e0 feat(ui): control adapter image rendering 2024-08-23 19:46:03 +10:00
psychedelicious
2f21a2220d fix(ui): do not floor bbox calc, it cuts off the last pixels 2024-08-23 19:46:03 +10:00
psychedelicious
91a420b13e feat(ui): fix issue where creating line needs 2 points 2024-08-23 19:46:03 +10:00
psychedelicious
c27da3581b fix(ui): edge cases when holding shift and drawing lines 2024-08-23 19:46:03 +10:00
psychedelicious
961dfbce93 fix(ui): set buffered rect color to full alpha 2024-08-23 19:46:03 +10:00
psychedelicious
df39c825ae fix(ui): handle mouseup correctly 2024-08-23 19:46:03 +10:00
psychedelicious
3f6ee1b7a4 feat(ui): buffered rect drawing 2024-08-23 19:46:03 +10:00
psychedelicious
908e504a6f fix(ui): buffered drawing edge cases 2024-08-23 19:46:03 +10:00
psychedelicious
f2fa41afc5 perf(ui): do not use stage.find 2024-08-23 19:46:03 +10:00
psychedelicious
440ff40ad5 perf(ui): object groups do not listen 2024-08-23 19:46:03 +10:00
psychedelicious
5c15458e15 perf(ui): buffered drawing (wip) 2024-08-23 19:46:03 +10:00
psychedelicious
be5b474f1e tidy(ui): organise files 2024-08-23 19:46:03 +10:00
psychedelicious
cee178c2b6 tidy(ui): organise files 2024-08-23 19:46:03 +10:00
psychedelicious
27657f8b7a tidy(ui): organise files 2024-08-23 19:46:03 +10:00
psychedelicious
e0cde3815a fix(ui): background rendering 2024-08-23 19:46:03 +10:00
psychedelicious
09d0421de4 pkg(ui): remove unused deps react-konva & use-image 2024-08-23 19:46:03 +10:00
psychedelicious
47b94d563c feat(ui): organize konva state and files 2024-08-23 19:46:03 +10:00
psychedelicious
0b5d20c9f0 fix(ui): merge conflicts in image deletion listener 2024-08-23 19:46:03 +10:00
psychedelicious
80e7e1293a fix(ui): region rendering 2024-08-23 19:46:03 +10:00
psychedelicious
3a82b0cbc1 fix(ui): inpaint mask rendering 2024-08-23 19:46:03 +10:00
psychedelicious
a27cbc13b6 fix(ui): staging area rendering 2024-08-23 19:46:03 +10:00
psychedelicious
a8f962eb3f fix(ui): stale selected entity 2024-08-23 19:46:03 +10:00
psychedelicious
7f40d23f19 fix(ui): staging area image offset 2024-08-23 19:46:03 +10:00
psychedelicious
918354cd9d feat(ui): tweak layer ui component 2024-08-23 19:46:03 +10:00
psychedelicious
eef9278ee6 fix(ui): resetting layer resets position 2024-08-23 19:46:03 +10:00
psychedelicious
2c32e2e5c1 feat(ui): updated layer list component styling 2024-08-23 19:46:03 +10:00
psychedelicious
6f05654db5 feat(ui): transformable layers 2024-08-23 19:46:03 +10:00
psychedelicious
1d31b6902f feat(ui): move tool icon is pointer like in other apps 2024-08-23 19:46:03 +10:00
psychedelicious
5a7d615e64 feat(ui): do not floor cursor position 2024-08-23 19:46:03 +10:00
psychedelicious
1dbf9e4ed4 feat(ui): disable gallery hotkeys while staging 2024-08-23 19:46:03 +10:00
psychedelicious
5dcc6ee203 feat(ui): revised canvas progress & staging image handling 2024-08-23 19:46:03 +10:00
psychedelicious
84a4e6ae3f feat(ui): show queue item origin in queue list 2024-08-23 19:46:03 +10:00
psychedelicious
f283bfd68f chore(ui): typegen 2024-08-23 19:46:03 +10:00
psychedelicious
6e5ff7b79c feat(app): add origin to session queue
The origin is an optional field indicating the queue item's origin. For example, "canvas" when the queue item originated from the canvas or "workflows" when the queue item originated from the workflows tab. If omitted, we assume the queue item originated from the API directly.

- Add migration to add the nullable column to the `session_queue` table.
- Update relevant event payloads with the new field.
- Add `cancel_by_origin` method to `session_queue` service and corresponding route. This is required for the canvas to bail out early when staging images.
- Add `origin` to both `SessionQueueItem` and `Batch` - it needs to be provided initially via the batch and then passed onto the queue item.
-
2024-08-23 19:46:03 +10:00
psychedelicious
7c3800d03f fix(ui): denoise start on outpainting 2024-08-23 19:46:03 +10:00
psychedelicious
941db90518 feat(ui): add redux events for queue cleared & batch enqueued socket events 2024-08-23 19:46:03 +10:00
psychedelicious
0d9ecf0f90 feat(ui): canvas staging area works 2024-08-23 19:46:03 +10:00
psychedelicious
9c77023a11 feat(ui): switch to view tool when staging 2024-08-23 19:46:03 +10:00
psychedelicious
b55378c63c tidy(ui): disable preview images on every enqueue 2024-08-23 19:46:03 +10:00
psychedelicious
946c2a49ab feat(ui): rough out save staging image 2024-08-23 19:46:03 +10:00
psychedelicious
b823c31ec6 feat(ui): staging area image visibility toggle 2024-08-23 19:46:03 +10:00
psychedelicious
ec6361e5cb fix(ui): batch building after removing canvas files 2024-08-23 19:46:03 +10:00
psychedelicious
0c26d28278 feat(ui): make Graph class's getMetadataNode public 2024-08-23 19:46:03 +10:00
psychedelicious
c5172d4c5a tidy(ui): remove old canvas graphs 2024-08-23 19:46:03 +10:00
psychedelicious
89de04775e fix(ui): do not select already-selected entity 2024-08-23 19:46:03 +10:00
psychedelicious
b4c3c940b5 tidy(ui): naming things 2024-08-23 19:46:03 +10:00
psychedelicious
aee2aad959 tidy(ui): file organisation 2024-08-23 19:46:03 +10:00
psychedelicious
5ca48a8a5f fix(ui): reset cursor pos when fitting document 2024-08-23 19:46:03 +10:00
psychedelicious
1806aa187b feat(ui): staging area works more better 2024-08-23 19:46:03 +10:00
psychedelicious
7824cb7a1a feat(ui): staging area barely works 2024-08-23 19:46:03 +10:00
psychedelicious
9807a896f4 feat(ui): consolidate konva API 2024-08-23 19:46:03 +10:00
psychedelicious
19866f057d feat(ui): consolidate konva API 2024-08-23 19:46:03 +10:00
psychedelicious
ec4eae3c9c feat(ui): staging area (rendering wip) 2024-08-23 19:46:03 +10:00
psychedelicious
bea0cba038 tidy(ui): type "Dimensions" -> "Size" 2024-08-23 19:46:03 +10:00
psychedelicious
48ee75af9c feat(ui): add updateNode to Graph 2024-08-23 19:46:03 +10:00
psychedelicious
929c593d2f feat(ui): sdxl graphs 2024-08-23 19:46:03 +10:00
psychedelicious
221f32eca7 feat(ui): sd1 outpaint graph 2024-08-23 19:46:03 +10:00
psychedelicious
c3acc15e8b tests(ui): add missing tests for Graph class 2024-08-23 19:46:03 +10:00
psychedelicious
1b653278fc feat(ui): add Graph.getid() util 2024-08-23 19:46:03 +10:00
psychedelicious
cc9062ee46 feat(ui): outpaint graph, organize builder a bit 2024-08-23 19:46:03 +10:00
psychedelicious
91c0feb0ad feat(ui): inpaint sd1 graph 2024-08-23 19:46:03 +10:00
psychedelicious
ae60292ac8 feat(ui): temp disable image caching while testing 2024-08-23 19:46:03 +10:00
psychedelicious
a6ca17b19d feat(ui): txt2img & img2img graphs 2024-08-23 19:46:03 +10:00
psychedelicious
6a4a5ece74 feat(ui): minor change to canvas bbox state type 2024-08-23 19:46:03 +10:00
psychedelicious
9b81860307 feat(ui): simplified konva node to blob/imagedata utils 2024-08-23 19:46:03 +10:00
psychedelicious
5f4a3928d2 feat(ui): node manager getter/setter 2024-08-23 19:46:03 +10:00
psychedelicious
b703884763 feat(ui): generation mode calculation, fudged graphs 2024-08-23 19:46:03 +10:00
psychedelicious
32da98ab8f feat(ui): add utils for getting images from canvas 2024-08-23 19:46:03 +10:00
psychedelicious
bd5a85bf70 feat(ui): even more simplified API - lean on the konva node manager to abstract imperative state API & rendering 2024-08-23 19:46:03 +10:00
psychedelicious
d045f24014 feat(ui): revised docstrings for renderers & simplified api 2024-08-23 19:46:03 +10:00
psychedelicious
2aad3f89c3 feat(ui): inpaint mask UI components 2024-08-23 19:46:03 +10:00
psychedelicious
dd54d19f00 feat(ui): inpaint mask rendering (wip) 2024-08-23 19:46:03 +10:00
psychedelicious
0ed6591d8c fix(ui): models loaded handler 2024-08-23 19:46:03 +10:00
psychedelicious
712e090134 feat(ui): internal state for inpaint mask 2024-08-23 19:46:03 +10:00
psychedelicious
8fc2a1d1cf refactor(ui): divvy up canvas state a bit 2024-08-23 19:46:03 +10:00
psychedelicious
cc15c1593e feat(ui): get region and base layer canvas to blob logic working 2024-08-23 19:46:03 +10:00
psychedelicious
9997d3abda refactor(ui): node manager handles more tedious annoying stuff 2024-08-23 19:46:03 +10:00
psychedelicious
031471e785 feat(ui): use node manager for addRegions 2024-08-23 19:46:03 +10:00
psychedelicious
2e860c6791 feat(ui): persist bbox 2024-08-23 19:46:03 +10:00
psychedelicious
d071a9e17d fix(ui): fix generation graphs 2024-08-23 19:46:03 +10:00
psychedelicious
ed53d33321 feat(ui): add toggle for clipToBbox 2024-08-23 19:46:03 +10:00
psychedelicious
382bc6d978 feat(ui): rename konva node manager 2024-08-23 19:46:03 +10:00
psychedelicious
dab42e258c refactor(ui): create classes to abstract mgmt of konva nodes 2024-08-23 19:46:03 +10:00
psychedelicious
81556410bb tidy(ui): organise renderers 2024-08-23 19:46:03 +10:00
psychedelicious
1f2dfd473c refactor(ui): create entity to konva node map abstraction (wip)
Instead of chaining konva `find` and `findOne` methods, all konva nodes are added to a mapping object. Finding and manipulating them is much simpler.

Done for regions and layers, wip for control adapters.
2024-08-23 19:46:03 +10:00
psychedelicious
8f0f51be2c perf(ui): fix lag w/ region rendering
Needed to memoize these selectors
2024-08-23 19:46:03 +10:00
psychedelicious
7179e250ed feat(ui): move canvas fill color picker to right 2024-08-23 19:46:03 +10:00
psychedelicious
5bec091fd6 refactor(ui): remove unused ellipse & polygon objects 2024-08-23 19:46:03 +10:00
psychedelicious
2c5896cb0c fix(ui): incorrect rect/brush/eraser positions 2024-08-23 19:46:03 +10:00
psychedelicious
93ff252dc0 refactor(ui): enable global debugging flag 2024-08-23 19:46:03 +10:00
psychedelicious
ac52224455 refactor(ui): disable the preview renderer for now 2024-08-23 19:46:03 +10:00
psychedelicious
4087cad23d tweak(ui): canvas editor layout 2024-08-23 19:46:03 +10:00
psychedelicious
e936b1ff8f perf(ui): memoize layeractionsmenu valid actions 2024-08-23 19:46:03 +10:00
psychedelicious
b7f9c5e221 refactor(ui): decouple konva renderer from react
Subscribe to redux store directly, skipping all the react overhead.

With react in dev mode, a typical frame while using the brush tool on almost-empty canvas is reduced from ~7.5ms to ~3.5ms. All things considered, this still feels slow, but it's a massive improvement.
2024-08-23 19:46:03 +10:00
psychedelicious
fc5467150e feat(ui): clip lines to bbox 2024-08-23 19:46:03 +10:00
psychedelicious
4dcab357a0 fix(ui): document fit positioning 2024-08-23 19:46:03 +10:00
psychedelicious
695e464255 feat(ui): document bounds overlay 2024-08-23 19:46:03 +10:00
psychedelicious
9999b60c3b tidy(ui): background layer 2024-08-23 19:46:03 +10:00
psychedelicious
e7df53e260 refactor(ui): use "entity" instead of "data" for canvas 2024-08-23 19:46:03 +10:00
psychedelicious
844590a571 feat(ui): brush size border radius = 1 2024-08-23 19:46:03 +10:00
psychedelicious
9622beaa0d fix(ui): canvas HUD doesn't interrupt tool 2024-08-23 19:46:03 +10:00
psychedelicious
007e2553a8 refactor(ui): split up canvas entity renderers, temp disable preview 2024-08-23 19:46:03 +10:00
psychedelicious
15ad4e3f5e fix(ui): delete all layers button 2024-08-23 19:46:03 +10:00
psychedelicious
be5094fcb4 fix(ui): ignore keyboard shortcuts in input/textarea elements 2024-08-23 19:46:03 +10:00
psychedelicious
a20a861680 fix(ui): canvas entity ids getting clobbered 2024-08-23 19:46:03 +10:00
psychedelicious
396d0a4bc0 fix(ui): move lora followup fixes 2024-08-23 19:46:03 +10:00
psychedelicious
ca9314e077 chore(ui): lint 2024-08-23 19:46:03 +10:00
psychedelicious
4b848798e7 refactor(ui): move loras to canvas slice 2024-08-23 19:46:03 +10:00
psychedelicious
083bcbc77d fix(ui): layer is selected when added 2024-08-23 19:46:03 +10:00
psychedelicious
e8cdc9ae62 feat(ui): r to center & fit stage on document 2024-08-23 19:46:03 +10:00
psychedelicious
8abfa759a4 feat(ui): better HUD 2024-08-23 19:46:03 +10:00
psychedelicious
f6a324b633 fix(ui): always use current brush width when making straight lines 2024-08-23 19:46:03 +10:00
psychedelicious
f083be9391 feat(ui): hold shift w/ brush to draw straight line 2024-08-23 19:46:03 +10:00
psychedelicious
091e2fb751 fix(ui): update bg on canvas resize 2024-08-23 19:46:03 +10:00
psychedelicious
d8539daf1f refactor(ui): better hud 2024-08-23 19:46:03 +10:00
psychedelicious
7ec059f5fa refactor(ui): scaled tool preview border 2024-08-23 19:46:03 +10:00
psychedelicious
4f2ecdefd2 refactor(ui): port remaining canvasV1 rendering logic to V2, remove old code 2024-08-23 19:46:03 +10:00
psychedelicious
e8891a1988 refactor(ui): fix more types 2024-08-23 19:46:03 +10:00
psychedelicious
37d2607f34 refactor(ui): metadata recall (wip)
just enough let the app run
2024-08-23 19:46:03 +10:00
psychedelicious
0e7b10d3d9 refactor(ui): undo/redo button temp fix 2024-08-23 19:46:03 +10:00
psychedelicious
1f85888638 refactor(ui): fix renderer stuff 2024-08-23 19:46:03 +10:00
psychedelicious
c1f9a129fa refactor(ui): fix misc types 2024-08-23 19:46:03 +10:00
psychedelicious
7ccc5ba398 refactor(ui): fix gallery stuff 2024-08-23 19:46:03 +10:00
psychedelicious
5e1a6ae334 refactor(ui): fix delete image stuff 2024-08-23 19:46:03 +10:00
psychedelicious
3f6cf638f9 refactor(ui): fix useIsReadyToEnqueue for new adapterType field 2024-08-23 19:46:03 +10:00
psychedelicious
46e062a828 refactor(ui): update generation tab graphs 2024-08-23 19:46:02 +10:00
psychedelicious
cc3a0b5d6c refactor(ui): add adapterType to ControlAdapterData 2024-08-23 19:46:02 +10:00
psychedelicious
775479ab7b refactor(ui): update components & logic to use new unified slice (again) 2024-08-23 19:46:02 +10:00
psychedelicious
6b9e0e6d63 refactor(ui): update components & logic to use new unified slice 2024-08-23 19:46:02 +10:00
psychedelicious
83a5c87f5e refactor(ui): merge compositing, params into canvasV2 slice 2024-08-23 19:46:02 +10:00
psychedelicious
84fde74331 refactor(ui): add scaled bbox state 2024-08-23 19:46:02 +10:00
psychedelicious
a517e29b39 refactor(ui): update dnd/image upload 2024-08-23 19:46:02 +10:00
psychedelicious
ccceba7565 refactor(ui): update size/prompts state 2024-08-23 19:46:02 +10:00
psychedelicious
5fc7a03669 refactor(ui): rip out old control adapter implementation 2024-08-23 19:46:02 +10:00
psychedelicious
8864ad1b50 refactor(ui): canvas v2 (wip)
fix entity count select
2024-08-23 19:46:02 +10:00
psychedelicious
f2989885fb refactor(ui): canvas v2 (wip)
delete unused file
2024-08-23 19:46:02 +10:00
psychedelicious
19c66e5c76 refactor(ui): canvas v2 (wip)
merge all canvas state reducers into one big slice (but with the logic split across files so it's not hell)
2024-08-23 19:46:02 +10:00
psychedelicious
8a6690a57c refactor(ui): canvas v2 (wip)
Fix a few more components
2024-08-23 19:46:02 +10:00
psychedelicious
2cc60f253a refactor(ui): canvas v2 (wip)
missed a spot
2024-08-23 19:46:02 +10:00
psychedelicious
cb69872dd3 refactor(ui): canvas v2 (wip)
Redo all UI components for different canvas entity types
2024-08-23 19:46:02 +10:00
psychedelicious
ba66d7c9a6 refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
psychedelicious
9fe727c9f8 refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
psychedelicious
58c656224f refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
psychedelicious
c51253f5f6 refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
psychedelicious
6c1d1588fc feat(ui): bbox tool 2024-08-23 19:46:02 +10:00
psychedelicious
95d6183a6c fix(ui): rect tool preview 2024-08-23 19:46:02 +10:00
psychedelicious
f4c9facdaf fix(ui): multiple stages 2024-08-23 19:46:02 +10:00
psychedelicious
a274e6f165 feat(ui): decouple konva logic from nanostores 2024-08-23 19:46:02 +10:00
psychedelicious
154e3e6f64 feat(ui): store all stage attrs in nanostores 2024-08-23 19:46:02 +10:00
psychedelicious
2c3ac972e5 feat(ui): round stage scale 2024-08-23 19:46:02 +10:00
psychedelicious
2e2e072b0b chore(ui): bump konva 2024-08-23 19:46:02 +10:00
psychedelicious
9d8dd2bf66 feat(ui): generation bbox transformation working
whew
2024-08-23 19:46:02 +10:00
psychedelicious
9047f6db30 feat(ui): wip generation bbox 2024-08-23 19:46:02 +10:00
psychedelicious
5ab345ee63 feat(ui): wip generation bbox 2024-08-23 19:46:02 +10:00
psychedelicious
d8a83acd3a feat(ui): CL zoom and pan, some rendering optimizations 2024-08-23 19:46:02 +10:00
psychedelicious
1f58e5756b Revert "feat(ui): add x,y,scaleX,scaleY,rotation to objects"
This reverts commit 53318b396c967c72326a7e4dea09667b2ab20bdd.
2024-08-23 19:46:02 +10:00
psychedelicious
744acb8f07 feat(ui): layers manage their own bbox 2024-08-23 19:46:02 +10:00
psychedelicious
ae7228d821 docs(ui): konva image object docstrings 2024-08-23 19:46:02 +10:00
psychedelicious
99d8b3a7bf feat(ui): add x,y,scaleX,scaleY,rotation to objects 2024-08-23 19:46:02 +10:00
psychedelicious
3fbe65bbcf fix(ui): show color picker when using rect tool 2024-08-23 19:46:02 +10:00
psychedelicious
f5d879d8e7 feat(ui): image loading fallback for raster layers 2024-08-23 19:46:02 +10:00
psychedelicious
cbc5a4f8e6 feat(ui): bbox calc for raster layers 2024-08-23 19:46:02 +10:00
psychedelicious
37ac7d8ed5 feat(ui): do not fill brush preview when drawing 2024-08-23 19:46:02 +10:00
psychedelicious
bee3fa339d fix(ui): brush spacing handling 2024-08-23 19:46:02 +10:00
psychedelicious
c171fe2b96 fix(ui): jank when starting a shape when not already focused on stage 2024-08-23 19:46:02 +10:00
psychedelicious
1fa8032fdb feat(ui): wip raster layers
I meant to split this up into smaller commits and undo some of it, but I committed afterwards and it's tedious to undo.
2024-08-23 19:46:02 +10:00
psychedelicious
5c0676bcc2 feat(ui): support image objects on raster layers
Just the UI and internal state, not rendering yet.
2024-08-23 19:46:02 +10:00
psychedelicious
cefd9a027c tidy(ui): clean up event handlers
Separate logic for each tool in preparation for ellipse and polygon tools.
2024-08-23 19:46:02 +10:00
psychedelicious
1bce156de1 feat(ui): raster layer reset, object group util 2024-08-23 19:46:02 +10:00
psychedelicious
cd4f63f2fd feat(ui): rect shape preview now has fill 2024-08-23 19:46:02 +10:00
psychedelicious
3c7140cbf3 feat(ui): cancel shape drawing on esc 2024-08-23 19:46:02 +10:00
psychedelicious
b71ba63b5a feat(ui): temp disable history on CL 2024-08-23 19:46:02 +10:00
psychedelicious
d540e2c0d3 feat(ui): raster layer logic
- Deduplicate shared logic
- Split up giant renderers file into separate cohesive files
- Tons of cleanup
- Progress on raster layer functionality
2024-08-23 19:46:02 +10:00
psychedelicious
d79fafc5f5 feat(ui): add raster layer rendering and interaction (WIP) 2024-08-23 19:46:02 +10:00
psychedelicious
9e93fa2092 feat(ui): scaffold out raster layers
Raster layers may have images, lines and shapes. These will replace initial image layers and provide sketching functionality like we have on canvas.
2024-08-23 19:46:02 +10:00
psychedelicious
392e9b4882 refactor(ui): revise types for line and rect objects
- Create separate object types for brush and eraser lines, instead of a single type that has a `tool` field.
- Create new object type for rect shapes.
- Add logic to schemas to migrate old object types to new.
- Update renderers & reducers.
2024-08-23 19:46:02 +10:00
911 changed files with 15444 additions and 44200 deletions

View File

@@ -13,12 +13,6 @@ on:
tags:
- 'v*.*.*'
workflow_dispatch:
inputs:
push-to-registry:
description: Push the built image to the container registry
required: false
type: boolean
default: false
permissions:
contents: write
@@ -56,15 +50,16 @@ jobs:
df -h
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@v4
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
images: |
ghcr.io/${{ github.repository }}
${{ env.DOCKERHUB_REPOSITORY }}
tags: |
type=ref,event=branch
type=ref,event=tag
@@ -77,33 +72,49 @@ jobs:
suffix=-${{ matrix.gpu-driver }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
with:
platforms: ${{ env.PLATFORMS }}
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
# - name: Login to Docker Hub
# if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
# uses: docker/login-action@v2
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build container
timeout-minutes: 40
id: docker_build
uses: docker/build-push-action@v6
uses: docker/build-push-action@v4
with:
context: .
file: docker/Dockerfile
platforms: ${{ env.PLATFORMS }}
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: |
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
type=gha,scope=main-${{ matrix.gpu-driver }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
# - name: Docker Hub Description
# if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
# uses: peter-evans/dockerhub-description@v3
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
# repository: ${{ vars.DOCKERHUB_REPOSITORY }}
# short-description: ${{ github.event.repository.description }}

View File

@@ -60,7 +60,7 @@ jobs:
extra-index-url: 'https://download.pytorch.org/whl/cpu'
github-env: $GITHUB_ENV
- platform: macos-default
os: macOS-14
os: macOS-12
github-env: $GITHUB_ENV
- platform: windows-cpu
os: windows-2022

View File

@@ -166,7 +166,7 @@ There are several ways to install IP-Adapter models with an existing InvokeAI in
1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [4] to download models.
2. Through the Model Manager UI with models from the *Tools* section of [models.invoke.ai](https://models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models.
3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders should be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder.
3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders shouid be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder.
#### Using IP-Adapter

View File

@@ -8,7 +8,7 @@ Invoke uses a SQLite database to store image, workflow, model, and execution dat
We take great care to ensure your data is safe, by utilizing transactions and a database migration system.
Even so, when testing a prerelease version of the app, we strongly suggest either backing up your database or using an in-memory database. This ensures any prelease hiccups or databases schema changes will not cause problems for your data.
Even so, when testing an prerelease version of the app, we strongly suggest either backing up your database or using an in-memory database. This ensures any prelease hiccups or databases schema changes will not cause problems for your data.
## Database Backup

View File

@@ -70,7 +70,7 @@ Each image also has a context menu (ctrl+click / right-click).
- ***Use Prompt **** this will load only the image's text prompts into the left-hand control panel
- ***Use Seed **** this will load only the image's Seed into the left-hand control panel
- ***Use All **** this will load all of the image's generation information into the left-hand control panel
- ***Send to Image to Image*** this will put the image into the left-hand panel in the Image to Image tab and automatically open it
- ***Send to Image to Image*** this will put the image into the left-hand panel in the Image to Image tab ana automatically open it
- ***Send to Unified Canvas*** This will (bold)replace whatever is already present(bold) in the Unified Canvas tab with the image and automatically open the tab
- ***Change Board*** this will oipen a small window that will let you move the image to a different board. This is the same as dragging the image to that board's thumbnail.
- ***Star Image*** this will add the image to the board's list of starred images that are always kept at the top of the gallery. This is the same as clicking on the star on the top right-hand side of the image that appears when you hover over the image with the mouse

View File

@@ -8,7 +8,7 @@ be used to teach an old model new tricks.
## How to Merge Models
Model Merging can be done by navigating to the Model Manager and clicking the "Merge Models" tab. From there, you can select the models and settings you want to use to merge the models.
Model Merging can be be done by navigating to the Model Manager and clicking the "Merge Models" tab. From there, you can select the models and settings you want to use to merge th models.
## Settings

View File

@@ -232,7 +232,7 @@ clarity on the intent and common use cases we expect for utilizing them.
### Compositing / Seam Correction
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
by Stable Diffusion into your existing image. This is achieved through compositing - the area around the boundary between your image and the new generation is
by Stable Diffusion into your existing image. This is achieved through compositing - the area around the the boundary between your image and the new generation is
automatically blended to produce a seamless output. In a fully automatic
process, a mask is generated to cover the boundary, and then the area of the boundary is
Inpainted.
@@ -242,13 +242,13 @@ help to alter the parameters that control the Compositing. A larger blur and
a blur setting have been noted as producing
consistently strong results . Strength of 0.7 is best for reducing hard seams.
- **Mode** - What part of the image will have the Compositing applied to it.
- **Mode** - What part of the image will have the the Compositing applied to it.
- **Mask edge** will apply Compositing to the edge of the masked area
- **Mask** will apply Compositing to the entire masked area
- **Unmasked** will apply Compositing to the entire image
- **Steps** - Number of generation steps that will occur during the Coherence Pass, similar to Denoising Steps. Higher step counts will generally have better results.
- **Strength** - How much noise is added for the Coherence Pass, similar to Denoising Strength. A strength of 0 will result in an unchanged image, while a strength of 1 will result in an image with a completely new area as defined by the Mode setting.
- **Blur** - Adjusts the pixel radius of the mask. A larger blur radius will cause the mask to extend past the visibly masked area, while too small of a blur radius will result in a mask that is smaller than the visibly masked area.
- **Blur** - Adjusts the pixel radius of the the mask. A larger blur radius will cause the mask to extend past the visibly masked area, while too small of a blur radius will result in a mask that is smaller than the visibly masked area.
- **Blur Method** - The method of blur applied to the masked area.

View File

@@ -296,7 +296,7 @@ finding and fixing three problems that can arise over time:
into the database.
3. The thumbnail for an image is missing, again causing a black
gallery thumbnail. This is fixed by running the "thumbnails"
gallery thumbnail. This is fixed by running the "thumbnaiils"
operation, which simply regenerates and re-registers the missing
thumbnail.

View File

@@ -196,22 +196,6 @@ tips to reduce the problem:
=== "12GB VRAM GPU"
This should be sufficient to generate larger images up to about 1280x1280.
## Checkpoint Models Load Slowly or Use Too Much RAM
The difference between diffusers models (a folder containing multiple
subfolders) and checkpoint models (a file ending with .safetensors or
.ckpt) is that InvokeAI is able to load diffusers models into memory
incrementally, while checkpoint models must be loaded all at
once. With very large models, or systems with limited RAM, you may
experience slowdowns and other memory-related issues when loading
checkpoint models.
To solve this, go to the Model Manager tab (the cube), select the
checkpoint model that's giving you trouble, and press the "Convert"
button in the upper right of your browser window. This will conver the
checkpoint into a diffusers model, after which loading should be
faster and less memory-intensive.
## Memory Leak (Linux)

View File

@@ -10,7 +10,7 @@ The suggested method is to use `git clone` to clone the repository the node is f
If you'd prefer, you can also just download the whole node folder from the linked repository and add it to the `nodes` folder.
To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
- Community Nodes
+ [Adapters-Linked](#adapters-linked-nodes)
@@ -427,7 +427,7 @@ This node works best with SDXL models, especially as the style can be described
5. `Prompt Strength Combine` - Combines weighted prompts for .and()/.blend()
6. `CSV To Index String` - Gets a string from a CSV by index. Includes a Random index option
The following Nodes are now included in v3.2 of Invoke and are no longer in this set of tools.<br>
The following Nodes are now included in v3.2 of Invoke and are nolonger in this set of tools.<br>
- `Prompt Join` -> `String Join`
- `Prompt Join Three` -> `String Join Three`
- `Prompt Replace` -> `String Replace`
@@ -456,7 +456,7 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
### BriaAI Remove Background
**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be producing better results than any other previous background removal tool.
**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be be producing better results than any other previous background removal tool.
**Node Link:** https://github.com/blessedcoolant/invoke_bria_rmbg

View File

@@ -3,10 +3,8 @@
import io
import pathlib
import shutil
import traceback
from copy import deepcopy
from enum import Enum
from tempfile import TemporaryDirectory
from typing import List, Optional, Type
@@ -19,7 +17,6 @@ from starlette.exceptions import HTTPException
from typing_extensions import Annotated
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.config import get_config
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
from invokeai.app.services.model_install.model_install_common import ModelInstallJob
from invokeai.app.services.model_records import (
@@ -34,7 +31,6 @@ from invokeai.backend.model_manager.config import (
ModelFormat,
ModelType,
)
from invokeai.backend.model_manager.load.model_cache.model_cache_base import CacheStats
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataWithFiles, UnknownMetadataException
from invokeai.backend.model_manager.search import ModelSearch
@@ -54,13 +50,6 @@ class ModelsList(BaseModel):
model_config = ConfigDict(use_enum_values=True)
class CacheType(str, Enum):
"""Cache type - one of vram or ram."""
RAM = "RAM"
VRAM = "VRAM"
def add_cover_image_to_model_config(config: AnyModelConfig, dependencies: Type[ApiDependencies]) -> AnyModelConfig:
"""Add a cover image URL to a model configuration."""
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
@@ -808,83 +797,3 @@ async def get_starter_models() -> list[StarterModel]:
model.dependencies = missing_deps
return starter_models
@model_manager_router.get(
"/model_cache",
operation_id="get_cache_size",
response_model=float,
summary="Get maximum size of model manager RAM or VRAM cache.",
)
async def get_cache_size(cache_type: CacheType = Query(description="The cache type", default=CacheType.RAM)) -> float:
"""Return the current RAM or VRAM cache size setting (in GB)."""
cache = ApiDependencies.invoker.services.model_manager.load.ram_cache
value = 0.0
if cache_type == CacheType.RAM:
value = cache.max_cache_size
elif cache_type == CacheType.VRAM:
value = cache.max_vram_cache_size
return value
@model_manager_router.put(
"/model_cache",
operation_id="set_cache_size",
response_model=float,
summary="Set maximum size of model manager RAM or VRAM cache, optionally writing new value out to invokeai.yaml config file.",
)
async def set_cache_size(
value: float = Query(description="The new value for the maximum cache size"),
cache_type: CacheType = Query(description="The cache type", default=CacheType.RAM),
persist: bool = Query(description="Write new value out to invokeai.yaml", default=False),
) -> float:
"""Set the current RAM or VRAM cache size setting (in GB). ."""
cache = ApiDependencies.invoker.services.model_manager.load.ram_cache
app_config = get_config()
# Record initial state.
vram_old = app_config.vram
ram_old = app_config.ram
# Prepare target state.
vram_new = vram_old
ram_new = ram_old
if cache_type == CacheType.RAM:
ram_new = value
elif cache_type == CacheType.VRAM:
vram_new = value
else:
raise ValueError(f"Unexpected {cache_type=}.")
config_path = app_config.config_file_path
new_config_path = config_path.with_suffix(".yaml.new")
try:
# Try to apply the target state.
cache.max_vram_cache_size = vram_new
cache.max_cache_size = ram_new
app_config.ram = ram_new
app_config.vram = vram_new
if persist:
app_config.write_file(new_config_path)
shutil.move(new_config_path, config_path)
except Exception as e:
# If there was a failure, restore the initial state.
cache.max_cache_size = ram_old
cache.max_vram_cache_size = vram_old
app_config.ram = ram_old
app_config.vram = vram_old
raise RuntimeError("Failed to update cache size") from e
return value
@model_manager_router.get(
"/stats",
operation_id="get_stats",
response_model=Optional[CacheStats],
summary="Get model manager RAM cache performance statistics.",
)
async def get_stats() -> Optional[CacheStats]:
"""Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded."""
return ApiDependencies.invoker.services.model_manager.load.ram_cache.stats

View File

@@ -11,11 +11,10 @@ from invokeai.app.services.session_queue.session_queue_common import (
Batch,
BatchStatus,
CancelByBatchIDsResult,
CancelByDestinationResult,
CancelByOriginResult,
ClearResult,
EnqueueBatchResult,
PruneResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
SessionQueueStatus,
@@ -108,18 +107,16 @@ async def cancel_by_batch_ids(
@session_queue_router.put(
"/{queue_id}/cancel_by_destination",
operation_id="cancel_by_destination",
"/{queue_id}/cancel_by_origin",
operation_id="cancel_by_origin",
responses={200: {"model": CancelByBatchIDsResult}},
)
async def cancel_by_destination(
async def cancel_by_origin(
queue_id: str = Path(description="The queue id to perform this operation on"),
destination: str = Query(description="The destination to cancel all queue items for"),
) -> CancelByDestinationResult:
origin: str = Query(description="The origin to cancel all queue items for"),
) -> CancelByOriginResult:
"""Immediately cancels all queue items with the given origin"""
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
queue_id=queue_id, destination=destination
)
return ApiDependencies.invoker.services.session_queue.cancel_by_origin(queue_id=queue_id, origin=origin)
@session_queue_router.put(
@@ -243,18 +240,3 @@ async def cancel_queue_item(
"""Deletes a queue item"""
return ApiDependencies.invoker.services.session_queue.cancel_queue_item(item_id)
@session_queue_router.get(
"/{queue_id}/counts_by_destination",
operation_id="counts_by_destination",
responses={200: {"model": SessionQueueCountsByDestination}},
)
async def counts_by_destination(
queue_id: str = Path(description="The queue id to query"),
destination: str = Query(description="The destination to query"),
) -> SessionQueueCountsByDestination:
"""Gets the counts of queue items by destination"""
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
queue_id=queue_id, destination=destination
)

View File

@@ -20,6 +20,7 @@ from typing import (
Type,
TypeVar,
Union,
cast,
)
import semver
@@ -60,15 +61,11 @@ class Classification(str, Enum, metaclass=MetaEnum):
- `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation.
- `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term.
- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.
- `Deprecated`: The invocation is deprecated and may be removed in a future version.
- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.
"""
Stable = "stable"
Beta = "beta"
Prototype = "prototype"
Deprecated = "deprecated"
Internal = "internal"
class UIConfigBase(BaseModel):
@@ -83,7 +80,7 @@ class UIConfigBase(BaseModel):
version: str = Field(
description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".',
)
node_pack: str = Field(description="The node pack that this node belongs to, will be 'invokeai' for built-in nodes")
node_pack: Optional[str] = Field(default=None, description="Whether or not this is a custom node")
classification: Classification = Field(default=Classification.Stable, description="The node's classification")
model_config = ConfigDict(
@@ -233,16 +230,18 @@ class BaseInvocation(ABC, BaseModel):
@staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None:
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
if title := model_class.UIConfig.title:
schema["title"] = title
if tags := model_class.UIConfig.tags:
schema["tags"] = tags
if category := model_class.UIConfig.category:
schema["category"] = category
if node_pack := model_class.UIConfig.node_pack:
schema["node_pack"] = node_pack
schema["classification"] = model_class.UIConfig.classification
schema["version"] = model_class.UIConfig.version
uiconfig = cast(UIConfigBase | None, getattr(model_class, "UIConfig", None))
if uiconfig is not None:
if uiconfig.title is not None:
schema["title"] = uiconfig.title
if uiconfig.tags is not None:
schema["tags"] = uiconfig.tags
if uiconfig.category is not None:
schema["category"] = uiconfig.category
if uiconfig.node_pack is not None:
schema["node_pack"] = uiconfig.node_pack
schema["classification"] = uiconfig.classification
schema["version"] = uiconfig.version
if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = []
schema["class"] = "invocation"
@@ -313,7 +312,7 @@ class BaseInvocation(ABC, BaseModel):
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
)
UIConfig: ClassVar[UIConfigBase]
UIConfig: ClassVar[Type[UIConfigBase]]
model_config = ConfigDict(
protected_namespaces=(),
@@ -442,25 +441,30 @@ def invocation(
validate_fields(cls.model_fields, invocation_type)
# Add OpenAPI schema extras
uiconfig: dict[str, Any] = {}
uiconfig["title"] = title
uiconfig["tags"] = tags
uiconfig["category"] = category
uiconfig["classification"] = classification
# The node pack is the module name - will be "invokeai" for built-in nodes
uiconfig["node_pack"] = cls.__module__.split(".")[0]
uiconfig_name = cls.__qualname__ + ".UIConfig"
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconfig_name:
cls.UIConfig = type(uiconfig_name, (UIConfigBase,), {})
cls.UIConfig.title = title
cls.UIConfig.tags = tags
cls.UIConfig.category = category
cls.UIConfig.classification = classification
# Grab the node pack's name from the module name, if it's a custom node
is_custom_node = cls.__module__.rsplit(".", 1)[0] == "invokeai.app.invocations"
if is_custom_node:
cls.UIConfig.node_pack = cls.__module__.split(".")[0]
else:
cls.UIConfig.node_pack = None
if version is not None:
try:
semver.Version.parse(version)
except ValueError as e:
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
uiconfig["version"] = version
cls.UIConfig.version = version
else:
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
uiconfig["version"] = "1.0.0"
cls.UIConfig = UIConfigBase(**uiconfig)
cls.UIConfig.version = "1.0.0"
if use_cache is not None:
cls.model_fields["use_cache"].default = use_cache

View File

@@ -1,34 +0,0 @@
import cv2
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.util import cv2_to_pil, pil_to_cv2
@invocation(
"canny_edge_detection",
title="Canny Edge Detection",
tags=["controlnet", "canny"],
category="controlnet",
version="1.0.0",
)
class CannyEdgeDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Geneartes an edge map using a cv2's Canny algorithm."""
image: ImageField = InputField(description="The image to process")
low_threshold: int = InputField(
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
)
high_threshold: int = InputField(
default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)"
)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
np_img = pil_to_cv2(image)
edge_map = cv2.Canny(np_img, self.low_threshold, self.high_threshold)
edge_map_pil = cv2_to_pil(edge_map)
image_dto = context.images.save(image=edge_map_pil)
return ImageOutput.build(image_dto)

View File

@@ -1,41 +0,0 @@
import cv2
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
@invocation(
"color_map",
title="Color Map",
tags=["controlnet"],
category="controlnet",
version="1.0.0",
)
class ColorMapInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates a color map from the provided image."""
image: ImageField = InputField(description="The image to process")
tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
np_image = pil_to_np(image)
height, width = np_image.shape[:2]
width_tile_size = min(self.tile_size, width)
height_tile_size = min(self.tile_size, height)
color_map = cv2.resize(
np_image,
(width // width_tile_size, height // height_tile_size),
interpolation=cv2.INTER_CUBIC,
)
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
color_map_pil = np_to_pil(color_map)
image_dto = context.images.save(image=color_map_pil)
return ImageOutput.build(image_dto)

View File

@@ -19,8 +19,7 @@ from invokeai.app.invocations.model import CLIPField
from invokeai.app.invocations.primitives import ConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.ti_utils import generate_ti_list
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo,
@@ -56,6 +55,7 @@ class CompelInvocation(BaseInvocation):
clip: CLIPField = InputField(
title="CLIP",
description=FieldDescriptions.clip,
input=Input.Connection,
)
mask: Optional[TensorField] = InputField(
default=None, description="A mask defining the region that this conditioning prompt applies to."
@@ -82,10 +82,9 @@ class CompelInvocation(BaseInvocation):
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
tokenizer_info as tokenizer,
LoRAPatcher.apply_lora_patches(
model=text_encoder,
patches=_lora_loader(),
prefix="lora_te_",
ModelPatcher.apply_lora_text_encoder(
text_encoder,
loras=_lora_loader(),
cached_weights=cached_weights,
),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
@@ -178,9 +177,9 @@ class SDXLPromptInvocationBase:
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
tokenizer_info as tokenizer,
LoRAPatcher.apply_lora_patches(
ModelPatcher.apply_lora(
text_encoder,
patches=_lora_loader(),
loras=_lora_loader(),
prefix=lora_prefix,
cached_weights=cached_weights,
),

View File

@@ -1,25 +0,0 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.content_shuffle import content_shuffle
@invocation(
"content_shuffle",
title="Content Shuffle",
tags=["controlnet", "normal"],
category="controlnet",
version="1.0.0",
)
class ContentShuffleInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Shuffles the image, similar to a 'liquify' filter."""
image: ImageField = InputField(description="The image to process")
scale_factor: int = InputField(default=256, ge=0, description="The scale factor used for the shuffle")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
output_image = content_shuffle(input_image=image, scale_factor=self.scale_factor)
image_dto = context.images.save(image=output_image)
return ImageOutput.build(image_dto)

View File

@@ -174,7 +174,6 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
tags=["controlnet", "canny"],
category="controlnet",
version="1.3.3",
classification=Classification.Deprecated,
)
class CannyImageProcessorInvocation(ImageProcessorInvocation):
"""Canny edge detection for ControlNet"""
@@ -209,7 +208,6 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "hed", "softedge"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class HedImageProcessorInvocation(ImageProcessorInvocation):
"""Applies HED edge detection to image"""
@@ -239,7 +237,6 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "lineart"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class LineartImageProcessorInvocation(ImageProcessorInvocation):
"""Applies line art processing to image"""
@@ -262,7 +259,6 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "lineart", "anime"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies line art anime processing to image"""
@@ -286,7 +282,6 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "midas"],
category="controlnet",
version="1.2.4",
classification=Classification.Deprecated,
)
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Midas depth processing to image"""
@@ -319,7 +314,6 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
"""Applies NormalBae processing to image"""
@@ -336,12 +330,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
@invocation(
"mlsd_image_processor",
title="MLSD Processor",
tags=["controlnet", "mlsd"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.3"
)
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
"""Applies MLSD processing to image"""
@@ -364,12 +353,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
@invocation(
"pidi_image_processor",
title="PIDI Processor",
tags=["controlnet", "pidi"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.3"
)
class PidiImageProcessorInvocation(ImageProcessorInvocation):
"""Applies PIDI processing to image"""
@@ -397,7 +381,6 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "contentshuffle"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
"""Applies content shuffle processing to image"""
@@ -428,7 +411,6 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "zoe", "depth"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Zoe depth processing to image"""
@@ -445,7 +427,6 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "mediapipe", "face"],
category="controlnet",
version="1.2.4",
classification=Classification.Deprecated,
)
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
"""Applies mediapipe face processing to image"""
@@ -473,7 +454,6 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "leres", "depth"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class LeresImageProcessorInvocation(ImageProcessorInvocation):
"""Applies leres processing to image"""
@@ -503,7 +483,6 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "tile"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
"""Tile resampler processor"""
@@ -544,7 +523,6 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "segmentanything"],
category="controlnet",
version="1.2.4",
classification=Classification.Deprecated,
)
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
"""Applies segment anything processing to image"""
@@ -592,7 +570,6 @@ class SamDetectorReproducibleColors(SamDetector):
tags=["controlnet"],
category="controlnet",
version="1.2.3",
classification=Classification.Deprecated,
)
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
"""Generates a color map from the provided image"""
@@ -632,7 +609,6 @@ DEPTH_ANYTHING_MODELS = {
tags=["controlnet", "depth", "depth anything"],
category="controlnet",
version="1.1.3",
classification=Classification.Deprecated,
)
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
"""Generates a depth map based on the Depth Anything algorithm"""
@@ -667,7 +643,6 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
tags=["controlnet", "dwpose", "openpose"],
category="controlnet",
version="1.1.1",
classification=Classification.Deprecated,
)
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
"""Generates an openpose pose from an image using DWPose"""

View File

@@ -28,10 +28,7 @@ from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_t
class GradientMaskOutput(BaseInvocationOutput):
"""Outputs a denoise mask and an image representing the total gradient of the mask."""
denoise_mask: DenoiseMaskField = OutputField(
description="Mask for denoise model run. Values of 0.0 represent the regions to be fully denoised, and 1.0 "
+ "represent the regions to be preserved."
)
denoise_mask: DenoiseMaskField = OutputField(description="Mask for denoise model run")
expanded_mask_area: ImageField = OutputField(
description="Image representing the total gradient area of the mask. For paste-back purposes."
)

View File

@@ -36,8 +36,7 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.stable_diffusion import PipelineIntermediateState
@@ -186,7 +185,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
)
denoise_mask: Optional[DenoiseMaskField] = InputField(
default=None,
description=FieldDescriptions.denoise_mask,
description=FieldDescriptions.mask,
input=Input.Connection,
ui_order=8,
)
@@ -980,10 +979,9 @@ class DenoiseLatentsInvocation(BaseInvocation):
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
SeamlessExt.static_patch_model(unet, self.unet.seamless_axes), # FIXME
# Apply the LoRA after unet has been moved to its target device for faster patching.
LoRAPatcher.apply_lora_patches(
model=unet,
patches=_lora_loader(),
prefix="lora_unet_",
ModelPatcher.apply_lora_unet(
unet,
loras=_lora_loader(),
cached_weights=cached_weights,
),
):

View File

@@ -1,45 +0,0 @@
from typing import Literal
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import DepthAnythingPipeline
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small", "small_v2"]
# DepthAnything V2 Small model is licensed under Apache 2.0 but not the base and large models.
DEPTH_ANYTHING_MODELS = {
"large": "LiheYoung/depth-anything-large-hf",
"base": "LiheYoung/depth-anything-base-hf",
"small": "LiheYoung/depth-anything-small-hf",
"small_v2": "depth-anything/Depth-Anything-V2-Small-hf",
}
@invocation(
"depth_anything_depth_estimation",
title="Depth Anything Depth Estimation",
tags=["controlnet", "depth", "depth anything"],
category="controlnet",
version="1.0.0",
)
class DepthAnythingDepthEstimationInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates a depth map using a Depth Anything model."""
image: ImageField = InputField(description="The image to process")
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
default="small_v2", description="The size of the depth model to use"
)
def invoke(self, context: InvocationContext) -> ImageOutput:
model_url = DEPTH_ANYTHING_MODELS[self.model_size]
image = context.images.get_pil(self.image.image_name, "RGB")
loaded_model = context.models.load_remote_model(model_url, DepthAnythingPipeline.load_model)
with loaded_model as depth_anything_detector:
assert isinstance(depth_anything_detector, DepthAnythingPipeline)
depth_map = depth_anything_detector.generate_depth(image)
image_dto = context.images.save(image=depth_map)
return ImageOutput.build(image_dto)

View File

@@ -1,50 +0,0 @@
import onnxruntime as ort
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector2
@invocation(
"dw_openpose_detection",
title="DW Openpose Detection",
tags=["controlnet", "dwpose", "openpose"],
category="controlnet",
version="1.1.1",
)
class DWOpenposeDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an openpose pose from an image using DWPose"""
image: ImageField = InputField(description="The image to process")
draw_body: bool = InputField(default=True)
draw_face: bool = InputField(default=False)
draw_hands: bool = InputField(default=False)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
onnx_det_path = context.models.download_and_cache_model(DWOpenposeDetector2.get_model_url_det())
onnx_pose_path = context.models.download_and_cache_model(DWOpenposeDetector2.get_model_url_pose())
loaded_session_det = context.models.load_local_model(
onnx_det_path, DWOpenposeDetector2.create_onnx_inference_session
)
loaded_session_pose = context.models.load_local_model(
onnx_pose_path, DWOpenposeDetector2.create_onnx_inference_session
)
with loaded_session_det as session_det, loaded_session_pose as session_pose:
assert isinstance(session_det, ort.InferenceSession)
assert isinstance(session_pose, ort.InferenceSession)
detector = DWOpenposeDetector2(session_det=session_det, session_pose=session_pose)
detected_image = detector.run(
image,
draw_face=self.draw_face,
draw_hands=self.draw_hands,
draw_body=self.draw_body,
)
image_dto = context.images.save(image=detected_image)
return ImageOutput.build(image_dto)

View File

@@ -40,18 +40,14 @@ class UIType(str, Enum, metaclass=MetaEnum):
# region Model Field Types
MainModel = "MainModelField"
FluxMainModel = "FluxMainModelField"
SDXLMainModel = "SDXLMainModelField"
SDXLRefinerModel = "SDXLRefinerModelField"
ONNXModel = "ONNXModelField"
VAEModel = "VAEModelField"
FluxVAEModel = "FluxVAEModelField"
LoRAModel = "LoRAModelField"
ControlNetModel = "ControlNetModelField"
IPAdapterModel = "IPAdapterModelField"
T2IAdapterModel = "T2IAdapterModelField"
T5EncoderModel = "T5EncoderModelField"
CLIPEmbedModel = "CLIPEmbedModelField"
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
# endregion
@@ -129,17 +125,13 @@ class FieldDescriptions:
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
t5_encoder = "T5 tokenizer and text encoder"
clip_embed_model = "CLIP Embed loader"
unet = "UNet (scheduler, LoRAs)"
transformer = "Transformer"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
flux_model = "Flux model (Transformer) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
@@ -181,7 +173,7 @@ class FieldDescriptions:
)
num_1 = "The first number"
num_2 = "The second number"
denoise_mask = "A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved."
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
@@ -239,12 +231,6 @@ class ColorField(BaseModel):
return (self.r, self.g, self.b, self.a)
class FluxConditioningField(BaseModel):
"""A conditioning tensor primitive value"""
conditioning_name: str = Field(description="The name of conditioning tensor")
class ConditioningField(BaseModel):
"""A conditioning tensor primitive value"""

View File

@@ -1,303 +0,0 @@
from contextlib import ExitStack
from typing import Callable, Iterator, Optional, Tuple
import torch
import torchvision.transforms as tv_transforms
from torchvision.transforms.functional import resize as tv_resize
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import (
DenoiseMaskField,
FieldDescriptions,
FluxConditioningField,
Input,
InputField,
LatentsField,
WithBoard,
WithMetadata,
)
from invokeai.app.invocations.model import TransformerField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.denoise import denoise
from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.sampling_utils import (
clip_timestep_schedule,
generate_img_ids,
get_noise,
get_schedule,
pack,
unpack,
)
from invokeai.backend.flux.trajectory_guidance_extension import TrajectoryGuidanceExtension
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.model_manager.config import ModelFormat
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
from invokeai.backend.util.devices import TorchDevice
@invocation(
"flux_denoise",
title="FLUX Denoise",
tags=["image", "flux"],
category="image",
version="2.1.0",
classification=Classification.Prototype,
)
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Run denoising process with a FLUX transformer model."""
# If latents is provided, this means we are doing image-to-image.
latents: Optional[LatentsField] = InputField(
default=None,
description=FieldDescriptions.latents,
input=Input.Connection,
)
# denoise_mask is used for image-to-image inpainting. Only the masked region is modified.
denoise_mask: Optional[DenoiseMaskField] = InputField(
default=None,
description=FieldDescriptions.denoise_mask,
input=Input.Connection,
)
denoising_start: float = InputField(
default=0.0,
ge=0,
le=1,
description=FieldDescriptions.denoising_start,
)
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
trajectory_guidance_strength: float = InputField(
default=0.0,
ge=0.0,
le=1.0,
description="Value indicating how strongly to guide the denoising process towards the initial latents (during image-to-image). Range [0, 1]. A value of 0.0 is equivalent to vanilla image-to-image. A value of 1.0 will guide the denoising process very close to the original latents.",
)
transformer: TransformerField = InputField(
description=FieldDescriptions.flux_model,
input=Input.Connection,
title="Transformer",
)
positive_text_conditioning: FluxConditioningField = InputField(
description=FieldDescriptions.positive_cond, input=Input.Connection
)
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
num_steps: int = InputField(
default=4, description="Number of diffusion steps. Recommended values are schnell: 4, dev: 50."
)
guidance: float = InputField(
default=4.0,
description="The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell.",
)
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = self._run_diffusion(context)
latents = latents.detach().to("cpu")
name = context.tensors.save(tensor=latents)
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
def _run_diffusion(
self,
context: InvocationContext,
):
inference_dtype = torch.bfloat16
# Load the conditioning data.
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
assert len(cond_data.conditionings) == 1
flux_conditioning = cond_data.conditionings[0]
assert isinstance(flux_conditioning, FLUXConditioningInfo)
flux_conditioning = flux_conditioning.to(dtype=inference_dtype)
t5_embeddings = flux_conditioning.t5_embeds
clip_embeddings = flux_conditioning.clip_embeds
# Load the input latents, if provided.
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
if init_latents is not None:
init_latents = init_latents.to(device=TorchDevice.choose_torch_device(), dtype=inference_dtype)
# Prepare input noise.
noise = get_noise(
num_samples=1,
height=self.height,
width=self.width,
device=TorchDevice.choose_torch_device(),
dtype=inference_dtype,
seed=self.seed,
)
transformer_info = context.models.load(self.transformer.transformer)
is_schnell = "schnell" in transformer_info.config.config_path
# Calculate the timestep schedule.
image_seq_len = noise.shape[-1] * noise.shape[-2] // 4
timesteps = get_schedule(
num_steps=self.num_steps,
image_seq_len=image_seq_len,
shift=not is_schnell,
)
# Clip the timesteps schedule based on denoising_start and denoising_end.
timesteps = clip_timestep_schedule(timesteps, self.denoising_start, self.denoising_end)
# Prepare input latent image.
if init_latents is not None:
# If init_latents is provided, we are doing image-to-image.
if is_schnell:
context.logger.warning(
"Running image-to-image with a FLUX schnell model. This is not recommended. The results are likely "
"to be poor. Consider using a FLUX dev model instead."
)
# Noise the orig_latents by the appropriate amount for the first timestep.
t_0 = timesteps[0]
x = t_0 * noise + (1.0 - t_0) * init_latents
else:
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
if self.denoising_start > 1e-5:
raise ValueError("denoising_start should be 0 when initial latents are not provided.")
x = noise
# If len(timesteps) == 1, then short-circuit. We are just noising the input latents, but not taking any
# denoising steps.
if len(timesteps) <= 1:
return x
inpaint_mask = self._prep_inpaint_mask(context, x)
b, _c, h, w = x.shape
img_ids = generate_img_ids(h=h, w=w, batch_size=b, device=x.device, dtype=x.dtype)
bs, t5_seq_len, _ = t5_embeddings.shape
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
# Pack all latent tensors.
init_latents = pack(init_latents) if init_latents is not None else None
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
noise = pack(noise)
x = pack(x)
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len correctly.
assert image_seq_len == x.shape[1]
# Prepare trajectory guidance extension.
traj_guidance_extension: TrajectoryGuidanceExtension | None = None
if init_latents is not None:
traj_guidance_extension = TrajectoryGuidanceExtension(
init_latents=init_latents,
inpaint_mask=inpaint_mask,
trajectory_guidance_strength=self.trajectory_guidance_strength,
)
with (
transformer_info.model_on_device() as (cached_weights, transformer),
ExitStack() as exit_stack,
):
assert isinstance(transformer, Flux)
config = transformer_info.config
assert config is not None
# Apply LoRA models to the transformer.
# Note: We apply the LoRA after the transformer has been moved to its target device for faster patching.
if config.format in [ModelFormat.Checkpoint]:
# The model is non-quantized, so we can apply the LoRA weights directly into the model.
exit_stack.enter_context(
LoRAPatcher.apply_lora_patches(
model=transformer,
patches=self._lora_iterator(context),
prefix="",
cached_weights=cached_weights,
)
)
elif config.format in [ModelFormat.BnbQuantizedLlmInt8b, ModelFormat.BnbQuantizednf4b]:
# The model is quantized, so apply the LoRA weights as sidecar layers. This results in slower inference,
# than directly patching the weights, but is agnostic to the quantization format.
exit_stack.enter_context(
LoRAPatcher.apply_lora_sidecar_patches(
model=transformer,
patches=self._lora_iterator(context),
prefix="",
dtype=inference_dtype,
)
)
else:
raise ValueError(f"Unsupported model format: {config.format}")
x = denoise(
model=transformer,
img=x,
img_ids=img_ids,
txt=t5_embeddings,
txt_ids=txt_ids,
vec=clip_embeddings,
timesteps=timesteps,
step_callback=self._build_step_callback(context),
guidance=self.guidance,
traj_guidance_extension=traj_guidance_extension,
)
x = unpack(x.float(), self.height, self.width)
return x
def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
"""Prepare the inpaint mask.
- Loads the mask
- Resizes if necessary
- Casts to same device/dtype as latents
- Expands mask to the same shape as latents so that they line up after 'packing'
Args:
context (InvocationContext): The invocation context, for loading the inpaint mask.
latents (torch.Tensor): A latent image tensor. In 'unpacked' format. Used to determine the target shape,
device, and dtype for the inpaint mask.
Returns:
torch.Tensor | None: Inpaint mask. Values of 0.0 represent the regions to be fully denoised, and 1.0
represent the regions to be preserved.
"""
if self.denoise_mask is None:
return None
mask = context.tensors.load(self.denoise_mask.mask_name)
# The input denoise_mask contains values in [0, 1], where 0.0 represents the regions to be fully denoised, and
# 1.0 represents the regions to be preserved.
# We invert the mask so that the regions to be preserved are 0.0 and the regions to be denoised are 1.0.
mask = 1.0 - mask
_, _, latent_height, latent_width = latents.shape
mask = tv_resize(
img=mask,
size=[latent_height, latent_width],
interpolation=tv_transforms.InterpolationMode.BILINEAR,
antialias=False,
)
mask = mask.to(device=latents.device, dtype=latents.dtype)
# Expand the inpaint mask to the same shape as `latents` so that when we 'pack' `mask` it lines up with
# `latents`.
return mask.expand_as(latents)
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
for lora in self.transformer.loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, LoRAModelRaw)
yield (lora_info.model, lora.weight)
del lora_info
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
def step_callback(state: PipelineIntermediateState) -> None:
state.latents = unpack(state.latents.float(), self.height, self.width).squeeze()
context.util.flux_step_callback(state)
return step_callback

View File

@@ -1,109 +0,0 @@
from typing import Optional
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
Classification,
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.model import LoRAField, ModelIdentifierField, TransformerField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import BaseModelType
@invocation_output("flux_lora_loader_output")
class FluxLoRALoaderOutput(BaseInvocationOutput):
"""FLUX LoRA Loader Output"""
transformer: Optional[TransformerField] = OutputField(
default=None, description=FieldDescriptions.transformer, title="FLUX Transformer"
)
@invocation(
"flux_lora_loader",
title="FLUX LoRA",
tags=["lora", "model", "flux"],
category="model",
version="1.0.0",
classification=Classification.Prototype,
)
class FluxLoRALoaderInvocation(BaseInvocation):
"""Apply a LoRA model to a FLUX transformer."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
transformer: TransformerField = InputField(
description=FieldDescriptions.transformer,
input=Input.Connection,
title="FLUX Transformer",
)
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
lora_key = self.lora.key
if not context.models.exists(lora_key):
raise ValueError(f"Unknown lora: {lora_key}!")
if any(lora.lora.key == lora_key for lora in self.transformer.loras):
raise ValueError(f'LoRA "{lora_key}" already applied to transformer.')
transformer = self.transformer.model_copy(deep=True)
transformer.loras.append(
LoRAField(
lora=self.lora,
weight=self.weight,
)
)
return FluxLoRALoaderOutput(transformer=transformer)
@invocation(
"flux_lora_collection_loader",
title="FLUX LoRA Collection Loader",
tags=["lora", "model", "flux"],
category="model",
version="1.0.0",
classification=Classification.Prototype,
)
class FLUXLoRACollectionLoader(BaseInvocation):
"""Applies a collection of LoRAs to a FLUX transformer."""
loras: LoRAField | list[LoRAField] = InputField(
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
)
transformer: Optional[TransformerField] = InputField(
default=None,
description=FieldDescriptions.transformer,
input=Input.Connection,
title="Transformer",
)
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
output = FluxLoRALoaderOutput()
loras = self.loras if isinstance(self.loras, list) else [self.loras]
added_loras: list[str] = []
for lora in loras:
if lora.lora.key in added_loras:
continue
if not context.models.exists(lora.lora.key):
raise Exception(f"Unknown lora: {lora.lora.key}!")
assert lora.lora.base is BaseModelType.Flux
added_loras.append(lora.lora.key)
if self.transformer is not None:
if output.transformer is None:
output.transformer = self.transformer.model_copy(deep=True)
output.transformer.loras.append(lora)
return output

View File

@@ -1,92 +0,0 @@
from typing import Literal
import torch
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
from invokeai.app.invocations.model import CLIPField, T5EncoderField
from invokeai.app.invocations.primitives import FluxConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.conditioner import HFEncoder
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
@invocation(
"flux_text_encoder",
title="FLUX Text Encoding",
tags=["prompt", "conditioning", "flux"],
category="conditioning",
version="1.0.0",
classification=Classification.Prototype,
)
class FluxTextEncoderInvocation(BaseInvocation):
"""Encodes and preps a prompt for a flux image."""
clip: CLIPField = InputField(
title="CLIP",
description=FieldDescriptions.clip,
input=Input.Connection,
)
t5_encoder: T5EncoderField = InputField(
title="T5Encoder",
description=FieldDescriptions.t5_encoder,
input=Input.Connection,
)
t5_max_seq_len: Literal[256, 512] = InputField(
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
)
prompt: str = InputField(description="Text prompt to encode.")
@torch.no_grad()
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
# Note: The T5 and CLIP encoding are done in separate functions to ensure that all model references are locally
# scoped. This ensures that the T5 model can be freed and gc'd before loading the CLIP model (if necessary).
t5_embeddings = self._t5_encode(context)
clip_embeddings = self._clip_encode(context)
conditioning_data = ConditioningFieldData(
conditionings=[FLUXConditioningInfo(clip_embeds=clip_embeddings, t5_embeds=t5_embeddings)]
)
conditioning_name = context.conditioning.save(conditioning_data)
return FluxConditioningOutput.build(conditioning_name)
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
prompt = [self.prompt]
with (
t5_text_encoder_info as t5_text_encoder,
t5_tokenizer_info as t5_tokenizer,
):
assert isinstance(t5_text_encoder, T5EncoderModel)
assert isinstance(t5_tokenizer, T5Tokenizer)
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
prompt_embeds = t5_encoder(prompt)
assert isinstance(prompt_embeds, torch.Tensor)
return prompt_embeds
def _clip_encode(self, context: InvocationContext) -> torch.Tensor:
clip_tokenizer_info = context.models.load(self.clip.tokenizer)
clip_text_encoder_info = context.models.load(self.clip.text_encoder)
prompt = [self.prompt]
with (
clip_text_encoder_info as clip_text_encoder,
clip_tokenizer_info as clip_tokenizer,
):
assert isinstance(clip_text_encoder, CLIPTextModel)
assert isinstance(clip_tokenizer, CLIPTokenizer)
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
pooled_prompt_embeds = clip_encoder(prompt)
assert isinstance(pooled_prompt_embeds, torch.Tensor)
return pooled_prompt_embeds

View File

@@ -1,60 +0,0 @@
import torch
from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
InputField,
LatentsField,
WithBoard,
WithMetadata,
)
from invokeai.app.invocations.model import VAEField
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.util.devices import TorchDevice
@invocation(
"flux_vae_decode",
title="FLUX Latents to Image",
tags=["latents", "image", "vae", "l2i", "flux"],
category="latents",
version="1.0.0",
)
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an image from latents."""
latents: LatentsField = InputField(
description=FieldDescriptions.latents,
input=Input.Connection,
)
vae: VAEField = InputField(
description=FieldDescriptions.vae,
input=Input.Connection,
)
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
with vae_info as vae:
assert isinstance(vae, AutoEncoder)
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype())
img = vae.decode(latents)
img = img.clamp(-1, 1)
img = rearrange(img[0], "c h w -> h w c") # noqa: F821
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
return img_pil
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
vae_info = context.models.load(self.vae.vae)
image = self._vae_decode(vae_info=vae_info, latents=latents)
TorchDevice.empty_cache()
image_dto = context.images.save(image=image)
return ImageOutput.build(image_dto)

View File

@@ -1,67 +0,0 @@
import einops
import torch
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import (
FieldDescriptions,
ImageField,
Input,
InputField,
)
from invokeai.app.invocations.model import VAEField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
@invocation(
"flux_vae_encode",
title="FLUX Image to Latents",
tags=["latents", "image", "vae", "i2l", "flux"],
category="latents",
version="1.0.0",
)
class FluxVaeEncodeInvocation(BaseInvocation):
"""Encodes an image into latents."""
image: ImageField = InputField(
description="The image to encode.",
)
vae: VAEField = InputField(
description=FieldDescriptions.vae,
input=Input.Connection,
)
@staticmethod
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
# TODO(ryand): Expose seed parameter at the invocation level.
# TODO(ryand): Write a util function for generating random tensors that is consistent across devices / dtypes.
# There's a starting point in get_noise(...), but it needs to be extracted and generalized. This function
# should be used for VAE encode sampling.
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
with vae_info as vae:
assert isinstance(vae, AutoEncoder)
image_tensor = image_tensor.to(
device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
)
latents = vae.encode(image_tensor, sample=True, generator=generator)
return latents
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
image = context.images.get_pil(self.image.image_name)
vae_info = context.models.load(self.vae.vae)
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image_tensor.dim() == 3:
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
latents = latents.to("cpu")
name = context.tensors.save(tensor=latents)
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)

View File

@@ -1,33 +0,0 @@
from builtins import bool
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.hed import ControlNetHED_Apache2, HEDEdgeDetector
@invocation(
"hed_edge_detection",
title="HED Edge Detection",
tags=["controlnet", "hed", "softedge"],
category="controlnet",
version="1.0.0",
)
class HEDEdgeDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Geneartes an edge map using the HED (softedge) model."""
image: ImageField = InputField(description="The image to process")
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
loaded_model = context.models.load_remote_model(HEDEdgeDetector.get_model_url(), HEDEdgeDetector.load_model)
with loaded_model as model:
assert isinstance(model, ControlNetHED_Apache2)
hed_processor = HEDEdgeDetector(model)
edge_map = hed_processor.run(image=image, scribble=self.scribble)
image_dto = context.images.save(image=edge_map)
return ImageOutput.build(image_dto)

View File

@@ -10,6 +10,7 @@ from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
Classification,
invocation,
invocation_output,
)
from invokeai.app.invocations.constants import IMAGE_MODES
from invokeai.app.invocations.fields import (
@@ -17,6 +18,7 @@ from invokeai.app.invocations.fields import (
FieldDescriptions,
ImageField,
InputField,
OutputField,
WithBoard,
WithMetadata,
)
@@ -1013,13 +1015,19 @@ class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
return ImageOutput.build(image_dto)
@invocation_output("canvas_v2_mask_and_crop_output")
class CanvasV2MaskAndCropOutput(ImageOutput):
offset_x: int = OutputField(description="The x offset of the image, after cropping")
offset_y: int = OutputField(description="The y offset of the image, after cropping")
@invocation(
"canvas_v2_mask_and_crop",
title="Canvas V2 Mask and Crop",
tags=["image", "mask", "id"],
category="image",
version="1.0.0",
classification=Classification.Internal,
classification=Classification.Prototype,
)
class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Handles Canvas V2 image output masking and cropping"""
@@ -1041,7 +1049,7 @@ class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
mask = dilated_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
return ImageOps.invert(mask.convert("L"))
def invoke(self, context: InvocationContext) -> ImageOutput:
def invoke(self, context: InvocationContext) -> CanvasV2MaskAndCropOutput:
mask = self._prepare_mask(context.images.get_pil(self.mask.image_name))
if self.source_image:
@@ -1054,4 +1062,13 @@ class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
generated_image.putalpha(mask)
image_dto = context.images.save(image=generated_image)
return ImageOutput.build(image_dto)
# bbox = image.getbbox()
# image = image.crop(bbox)
return CanvasV2MaskAndCropOutput(
image=ImageField(image_name=image_dto.image_name),
offset_x=0,
offset_y=0,
width=image_dto.width,
height=image_dto.height,
)

View File

@@ -1,34 +0,0 @@
from builtins import bool
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.lineart import Generator, LineartEdgeDetector
@invocation(
"lineart_edge_detection",
title="Lineart Edge Detection",
tags=["controlnet", "lineart"],
category="controlnet",
version="1.0.0",
)
class LineartEdgeDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an edge map using the Lineart model."""
image: ImageField = InputField(description="The image to process")
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
model_url = LineartEdgeDetector.get_model_url(self.coarse)
loaded_model = context.models.load_remote_model(model_url, LineartEdgeDetector.load_model)
with loaded_model as model:
assert isinstance(model, Generator)
detector = LineartEdgeDetector(model)
edge_map = detector.run(image=image)
image_dto = context.images.save(image=edge_map)
return ImageOutput.build(image_dto)

View File

@@ -1,31 +0,0 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.lineart_anime import LineartAnimeEdgeDetector, UnetGenerator
@invocation(
"lineart_anime_edge_detection",
title="Lineart Anime Edge Detection",
tags=["controlnet", "lineart"],
category="controlnet",
version="1.0.0",
)
class LineartAnimeEdgeDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Geneartes an edge map using the Lineart model."""
image: ImageField = InputField(description="The image to process")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
model_url = LineartAnimeEdgeDetector.get_model_url()
loaded_model = context.models.load_remote_model(model_url, LineartAnimeEdgeDetector.load_model)
with loaded_model as model:
assert isinstance(model, UnetGenerator)
detector = LineartAnimeEdgeDetector(model)
edge_map = detector.run(image=image)
image_dto = context.images.save(image=edge_map)
return ImageOutput.build(image_dto)

View File

@@ -126,7 +126,7 @@ class ImageMaskToTensorInvocation(BaseInvocation, WithMetadata):
title="Tensor Mask to Image",
tags=["mask"],
category="mask",
version="1.1.0",
version="1.0.0",
)
class MaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Convert a mask tensor to an image."""
@@ -135,11 +135,6 @@ class MaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
def invoke(self, context: InvocationContext) -> ImageOutput:
mask = context.tensors.load(self.mask.tensor_name)
# Squeeze the channel dimension if it exists.
if mask.dim() == 3:
mask = mask.squeeze(0)
# Ensure that the mask is binary.
if mask.dtype != torch.bool:
mask = mask > 0.5

View File

@@ -1,26 +0,0 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.mediapipe_face import detect_faces
@invocation(
"mediapipe_face_detection",
title="MediaPipe Face Detection",
tags=["controlnet", "face"],
category="controlnet",
version="1.0.0",
)
class MediaPipeFaceDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Detects faces using MediaPipe."""
image: ImageField = InputField(description="The image to process")
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
detected_faces = detect_faces(image=image, max_faces=self.max_faces, min_confidence=self.min_confidence)
image_dto = context.images.save(image=detected_faces)
return ImageOutput.build(image_dto)

View File

@@ -129,18 +129,7 @@ class MergeMetadataInvocation(BaseInvocation):
GENERATION_MODES = Literal[
"txt2img",
"img2img",
"inpaint",
"outpaint",
"sdxl_txt2img",
"sdxl_img2img",
"sdxl_inpaint",
"sdxl_outpaint",
"flux_txt2img",
"flux_img2img",
"flux_inpaint",
"flux_outpaint",
"txt2img", "img2img", "inpaint", "outpaint", "sdxl_txt2img", "sdxl_img2img", "sdxl_inpaint", "sdxl_outpaint"
]

View File

@@ -1,39 +0,0 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.mlsd import MLSDDetector
from invokeai.backend.image_util.mlsd.models.mbv2_mlsd_large import MobileV2_MLSD_Large
@invocation(
"mlsd_detection",
title="MLSD Detection",
tags=["controlnet", "mlsd", "edge"],
category="controlnet",
version="1.0.0",
)
class MLSDDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an line segment map using MLSD."""
image: ImageField = InputField(description="The image to process")
score_threshold: float = InputField(
default=0.1, ge=0, description="The threshold used to score points when determining line segments"
)
distance_threshold: float = InputField(
default=20.0,
ge=0,
description="Threshold for including a line segment - lines shorter than this distance will be discarded",
)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
loaded_model = context.models.load_remote_model(MLSDDetector.get_model_url(), MLSDDetector.load_model)
with loaded_model as model:
assert isinstance(model, MobileV2_MLSD_Large)
detector = MLSDDetector(model)
edge_map = detector.run(image, self.score_threshold, self.distance_threshold)
image_dto = context.images.save(image=edge_map)
return ImageOutput.build(image_dto)

View File

@@ -1,5 +1,5 @@
import copy
from typing import List, Literal, Optional
from typing import List, Optional
from pydantic import BaseModel, Field
@@ -13,14 +13,7 @@ from invokeai.app.invocations.baseinvocation import (
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.flux.util import max_seq_lengths
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
CheckpointConfigBase,
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType
class ModelIdentifierField(BaseModel):
@@ -67,16 +60,6 @@ class CLIPField(BaseModel):
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
class TransformerField(BaseModel):
transformer: ModelIdentifierField = Field(description="Info to load Transformer submodel")
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
class T5EncoderField(BaseModel):
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
class VAEField(BaseModel):
vae: ModelIdentifierField = Field(description="Info to load vae submodel")
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
@@ -139,78 +122,6 @@ class ModelIdentifierInvocation(BaseInvocation):
return ModelIdentifierOutput(model=self.model)
@invocation_output("flux_model_loader_output")
class FluxModelLoaderOutput(BaseInvocationOutput):
"""Flux base model loader output"""
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
max_seq_len: Literal[256, 512] = OutputField(
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
title="Max Seq Length",
)
@invocation(
"flux_model_loader",
title="Flux Main Model",
tags=["model", "flux"],
category="model",
version="1.0.4",
classification=Classification.Prototype,
)
class FluxModelLoaderInvocation(BaseInvocation):
"""Loads a flux base model, outputting its submodels."""
model: ModelIdentifierField = InputField(
description=FieldDescriptions.flux_model,
ui_type=UIType.FluxMainModel,
input=Input.Direct,
)
t5_encoder_model: ModelIdentifierField = InputField(
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
)
clip_embed_model: ModelIdentifierField = InputField(
description=FieldDescriptions.clip_embed_model,
ui_type=UIType.CLIPEmbedModel,
input=Input.Direct,
title="CLIP Embed",
)
vae_model: ModelIdentifierField = InputField(
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
)
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
if not context.models.exists(key):
raise ValueError(f"Unknown model: {key}")
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
transformer_config = context.models.get_config(transformer)
assert isinstance(transformer_config, CheckpointConfigBase)
return FluxModelLoaderOutput(
transformer=TransformerField(transformer=transformer, loras=[]),
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
vae=VAEField(vae=vae),
max_seq_len=max_seq_lengths[transformer_config.config_path],
)
@invocation(
"main_model_loader",
title="Main Model",

View File

@@ -1,31 +0,0 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.normal_bae import NormalMapDetector
from invokeai.backend.image_util.normal_bae.nets.NNET import NNET
@invocation(
"normal_map",
title="Normal Map",
tags=["controlnet", "normal"],
category="controlnet",
version="1.0.0",
)
class NormalMapInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates a normal map."""
image: ImageField = InputField(description="The image to process")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
loaded_model = context.models.load_remote_model(NormalMapDetector.get_model_url(), NormalMapDetector.load_model)
with loaded_model as model:
assert isinstance(model, NNET)
detector = NormalMapDetector(model)
normal_map = detector.run(image=image)
image_dto = context.images.save(image=normal_map)
return ImageOutput.build(image_dto)

View File

@@ -1,33 +0,0 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.pidi import PIDINetDetector
from invokeai.backend.image_util.pidi.model import PiDiNet
@invocation(
"pidi_edge_detection",
title="PiDiNet Edge Detection",
tags=["controlnet", "edge"],
category="controlnet",
version="1.0.0",
)
class PiDiNetEdgeDetectionInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an edge map using PiDiNet."""
image: ImageField = InputField(description="The image to process")
quantize_edges: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
loaded_model = context.models.load_remote_model(PIDINetDetector.get_model_url(), PIDINetDetector.load_model)
with loaded_model as model:
assert isinstance(model, PiDiNet)
detector = PIDINetDetector(model)
edge_map = detector.run(image=image, quantize_edges=self.quantize_edges, scribble=self.scribble)
image_dto = context.images.save(image=edge_map)
return ImageOutput.build(image_dto)

View File

@@ -12,7 +12,6 @@ from invokeai.app.invocations.fields import (
ConditioningField,
DenoiseMaskField,
FieldDescriptions,
FluxConditioningField,
ImageField,
Input,
InputField,
@@ -415,17 +414,6 @@ class MaskOutput(BaseInvocationOutput):
height: int = OutputField(description="The height of the mask in pixels.")
@invocation_output("flux_conditioning_output")
class FluxConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output a single conditioning tensor"""
conditioning: FluxConditioningField = OutputField(description=FieldDescriptions.cond)
@classmethod
def build(cls, conditioning_name: str) -> "FluxConditioningOutput":
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
@invocation_output("conditioning_output")
class ConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output a single conditioning tensor"""

View File

@@ -22,8 +22,8 @@ from invokeai.app.invocations.fields import (
from invokeai.app.invocations.model import UNetField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.stable_diffusion.diffusers_pipeline import ControlNetData, PipelineIntermediateState
from invokeai.backend.stable_diffusion.multi_diffusion_pipeline import (
MultiDiffusionPipeline,
@@ -204,11 +204,7 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
# Load the UNet model.
unet_info = context.models.load(self.unet.unet)
with (
ExitStack() as exit_stack,
unet_info as unet,
LoRAPatcher.apply_lora_patches(model=unet, patches=_lora_loader(), prefix="lora_unet_"),
):
with ExitStack() as exit_stack, unet_info as unet, ModelPatcher.apply_lora_unet(unet, _lora_loader()):
assert isinstance(unet, UNet2DConditionModel)
latents = latents.to(device=unet.device, dtype=unet.dtype)
if noise is not None:

View File

@@ -146,11 +146,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT COUNT(*)
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE images.is_intermediate = FALSE
AND board_images.board_id = ?;
SELECT COUNT(*) FROM board_images WHERE board_id = ?;
""",
(board_id,),
)

View File

@@ -88,8 +88,7 @@ class QueueItemEventBase(QueueEventBase):
item_id: int = Field(description="The ID of the queue item")
batch_id: str = Field(description="The ID of the queue batch")
origin: str | None = Field(default=None, description="The origin of the queue item")
destination: str | None = Field(default=None, description="The destination of the queue item")
origin: str | None = Field(default=None, description="The origin of the batch")
class InvocationEventBase(QueueItemEventBase):
@@ -115,7 +114,6 @@ class InvocationStartedEvent(InvocationEventBase):
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -150,7 +148,6 @@ class InvocationDenoiseProgressEvent(InvocationEventBase):
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -189,7 +186,6 @@ class InvocationCompleteEvent(InvocationEventBase):
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -223,7 +219,6 @@ class InvocationErrorEvent(InvocationEventBase):
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -262,7 +257,6 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
status=queue_item.status,
error_type=queue_item.error_type,

View File

@@ -407,7 +407,6 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
FROM images
JOIN board_images ON images.image_name = board_images.image_name
WHERE board_images.board_id = ?
AND images.is_intermediate = FALSE
ORDER BY images.starred DESC, images.created_at DESC
LIMIT 1;
""",

View File

@@ -254,7 +254,7 @@ class ModelInstallServiceBase(ABC):
is periodically cleared of infrequently-used entries when the model
converter runs.
Note that this doesn't automatically install or register the model, but is
Note that this doesn't automaticallly install or register the model, but is
intended for use by nodes that need access to models that aren't directly
supported by InvokeAI. The downloading process takes advantage of the download queue
to avoid interrupting other operations.

View File

@@ -103,7 +103,7 @@ class HFModelSource(StringLikeSource):
if self.variant:
base += f":{self.variant or ''}"
if self.subfolder:
base += f"::{self.subfolder.as_posix()}"
base += f":{self.subfolder}"
return base

View File

@@ -783,9 +783,8 @@ class ModelInstallService(ModelInstallServiceBase):
# So what we do is to synthesize a folder named "sdxl-turbo_vae" here.
if subfolder:
top = Path(remote_files[0].path.parts[0]) # e.g. "sdxl-turbo/"
path_to_remove = top / subfolder # sdxl-turbo/vae/
subfolder_rename = subfolder.name.replace("/", "_").replace("\\", "_")
path_to_add = Path(f"{top}_{subfolder_rename}")
path_to_remove = top / subfolder.parts[-1] # sdxl-turbo/vae/
path_to_add = Path(f"{top}_{subfolder}")
else:
path_to_remove = Path(".")
path_to_add = Path(".")

View File

@@ -77,7 +77,6 @@ class ModelRecordChanges(BaseModelExcludeNull):
type: Optional[ModelType] = Field(description="Type of model", default=None)
key: Optional[str] = Field(description="Database ID for this model", default=None)
hash: Optional[str] = Field(description="hash of model file", default=None)
format: Optional[str] = Field(description="format of model file", default=None)
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
default_settings: Optional[MainModelDefaultSettings | ControlAdapterDefaultSettings] = Field(
description="Default settings for this model", default=None

View File

@@ -6,14 +6,13 @@ from invokeai.app.services.session_queue.session_queue_common import (
Batch,
BatchStatus,
CancelByBatchIDsResult,
CancelByDestinationResult,
CancelByOriginResult,
CancelByQueueIDResult,
ClearResult,
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
PruneResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
SessionQueueStatus,
@@ -70,11 +69,6 @@ class SessionQueueBase(ABC):
"""Gets the status of the queue"""
pass
@abstractmethod
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
"""Gets the counts of queue items by destination"""
pass
@abstractmethod
def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
"""Gets the status of a batch"""
@@ -103,8 +97,8 @@ class SessionQueueBase(ABC):
pass
@abstractmethod
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
"""Cancels all queue items with the given batch destination"""
def cancel_by_origin(self, queue_id: str, origin: str) -> CancelByOriginResult:
"""Cancels all queue items with the given batch origin"""
pass
@abstractmethod

View File

@@ -77,14 +77,7 @@ BatchDataCollection: TypeAlias = list[list[BatchDatum]]
class Batch(BaseModel):
batch_id: str = Field(default_factory=uuid_string, description="The ID of the batch")
origin: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results.",
)
destination: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results",
)
origin: str | None = Field(default=None, description="The origin of this batch.")
data: Optional[BatchDataCollection] = Field(default=None, description="The batch data collection.")
graph: Graph = Field(description="The graph to initialize the session with")
workflow: Optional[WorkflowWithoutID] = Field(
@@ -203,14 +196,7 @@ class SessionQueueItemWithoutGraph(BaseModel):
status: QUEUE_ITEM_STATUS = Field(default="pending", description="The status of this queue item")
priority: int = Field(default=0, description="The priority of this queue item")
batch_id: str = Field(description="The ID of the batch associated with this queue item")
origin: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results.",
)
destination: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results",
)
origin: str | None = Field(default=None, description="The origin of this queue item. ")
session_id: str = Field(
description="The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed."
)
@@ -307,22 +293,10 @@ class SessionQueueStatus(BaseModel):
total: int = Field(..., description="Total number of queue items")
class SessionQueueCountsByDestination(BaseModel):
queue_id: str = Field(..., description="The ID of the queue")
destination: str = Field(..., description="The destination of queue items included in this status")
pending: int = Field(..., description="Number of queue items with status 'pending' for the destination")
in_progress: int = Field(..., description="Number of queue items with status 'in_progress' for the destination")
completed: int = Field(..., description="Number of queue items with status 'complete' for the destination")
failed: int = Field(..., description="Number of queue items with status 'error' for the destination")
canceled: int = Field(..., description="Number of queue items with status 'canceled' for the destination")
total: int = Field(..., description="Total number of queue items for the destination")
class BatchStatus(BaseModel):
queue_id: str = Field(..., description="The ID of the queue")
batch_id: str = Field(..., description="The ID of the batch")
origin: str | None = Field(..., description="The origin of the batch")
destination: str | None = Field(..., description="The destination of the batch")
pending: int = Field(..., description="Number of queue items with status 'pending'")
in_progress: int = Field(..., description="Number of queue items with status 'in_progress'")
completed: int = Field(..., description="Number of queue items with status 'complete'")
@@ -357,10 +331,10 @@ class CancelByBatchIDsResult(BaseModel):
canceled: int = Field(..., description="Number of queue items canceled")
class CancelByDestinationResult(CancelByBatchIDsResult):
"""Result of canceling by a destination"""
class CancelByOriginResult(BaseModel):
"""Result of canceling by list of batch ids"""
pass
canceled: int = Field(..., description="Number of queue items canceled")
class CancelByQueueIDResult(CancelByBatchIDsResult):
@@ -469,7 +443,6 @@ class SessionQueueValueToInsert(NamedTuple):
priority: int # priority
workflow: Optional[str] # workflow json
origin: str | None
destination: str | None
ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
@@ -491,7 +464,6 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new
priority, # priority
json.dumps(workflow, default=to_jsonable_python) if workflow else None, # workflow (json)
batch.origin, # origin
batch.destination, # destination
)
)
return values_to_insert

View File

@@ -10,14 +10,13 @@ from invokeai.app.services.session_queue.session_queue_common import (
Batch,
BatchStatus,
CancelByBatchIDsResult,
CancelByDestinationResult,
CancelByOriginResult,
CancelByQueueIDResult,
ClearResult,
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
PruneResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
SessionQueueItemNotFoundError,
@@ -129,8 +128,8 @@ class SqliteSessionQueue(SessionQueueBase):
self.__cursor.executemany(
"""--sql
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""",
values_to_insert,
)
@@ -427,19 +426,19 @@ class SqliteSessionQueue(SessionQueueBase):
self.__lock.release()
return CancelByBatchIDsResult(canceled=count)
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
def cancel_by_origin(self, queue_id: str, origin: str) -> CancelByOriginResult:
try:
current_queue_item = self.get_current(queue_id)
self.__lock.acquire()
where = """--sql
WHERE
queue_id == ?
AND destination == ?
AND origin == ?
AND status != 'canceled'
AND status != 'completed'
AND status != 'failed'
"""
params = (queue_id, destination)
params = (queue_id, origin)
self.__cursor.execute(
f"""--sql
SELECT COUNT(*)
@@ -458,14 +457,14 @@ class SqliteSessionQueue(SessionQueueBase):
params,
)
self.__conn.commit()
if current_queue_item is not None and current_queue_item.destination == destination:
if current_queue_item is not None and current_queue_item.origin == origin:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
return CancelByDestinationResult(canceled=count)
return CancelByOriginResult(canceled=count)
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
try:
@@ -580,8 +579,7 @@ class SqliteSessionQueue(SessionQueueBase):
session_id,
batch_id,
queue_id,
origin,
destination
origin
FROM session_queue
WHERE queue_id = ?
"""
@@ -661,7 +659,7 @@ class SqliteSessionQueue(SessionQueueBase):
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT status, count(*), origin, destination
SELECT status, count(*), origin
FROM session_queue
WHERE
queue_id = ?
@@ -674,7 +672,6 @@ class SqliteSessionQueue(SessionQueueBase):
total = sum(row[1] for row in result)
counts: dict[str, int] = {row[0]: row[1] for row in result}
origin = result[0]["origin"] if result else None
destination = result[0]["destination"] if result else None
except Exception:
self.__conn.rollback()
raise
@@ -684,7 +681,6 @@ class SqliteSessionQueue(SessionQueueBase):
return BatchStatus(
batch_id=batch_id,
origin=origin,
destination=destination,
queue_id=queue_id,
pending=counts.get("pending", 0),
in_progress=counts.get("in_progress", 0),
@@ -693,37 +689,3 @@ class SqliteSessionQueue(SessionQueueBase):
canceled=counts.get("canceled", 0),
total=total,
)
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
AND destination = ?
GROUP BY status
""",
(queue_id, destination),
)
counts_result = cast(list[sqlite3.Row], self.__cursor.fetchall())
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
total = sum(row[1] for row in counts_result)
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
return SessionQueueCountsByDestination(
queue_id=queue_id,
destination=destination,
pending=counts.get("pending", 0),
in_progress=counts.get("in_progress", 0),
completed=counts.get("completed", 0),
failed=counts.get("failed", 0),
canceled=counts.get("canceled", 0),
total=total,
)

View File

@@ -14,7 +14,7 @@ from invokeai.app.services.image_records.image_records_common import ImageCatego
from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.model_records.model_records_base import UnknownModelException
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_manager.config import (
AnyModel,
AnyModelConfig,
@@ -557,24 +557,6 @@ class UtilInterface(InvocationContextInterface):
is_canceled=self.is_canceled,
)
def flux_step_callback(self, intermediate_state: PipelineIntermediateState) -> None:
"""
The step callback emits a progress event with the current step, the total number of
steps, a preview image, and some other internal metadata.
This should be called after each denoising step.
Args:
intermediate_state: The intermediate state of the diffusion pipeline.
"""
flux_step_callback(
context_data=self._data,
intermediate_state=intermediate_state,
events=self._services.events,
is_canceled=self.is_canceled,
)
class InvocationContext:
"""Provides access to various services and data for the current invocation.

View File

@@ -10,11 +10,9 @@ class Migration15Callback:
def _add_origin_col(self, cursor: sqlite3.Cursor) -> None:
"""
- Adds `origin` column to the session queue table.
- Adds `destination` column to the session queue table.
"""
cursor.execute("ALTER TABLE session_queue ADD COLUMN origin TEXT;")
cursor.execute("ALTER TABLE session_queue ADD COLUMN destination TEXT;")
def build_migration_15() -> Migration:
@@ -23,7 +21,6 @@ def build_migration_15() -> Migration:
This migration does the following:
- Adds `origin` column to the session queue table.
- Adds `destination` column to the session queue table.
"""
migration_15 = Migration(
from_version=14,

View File

@@ -1,420 +0,0 @@
{
"name": "FLUX Image to Image",
"author": "InvokeAI",
"description": "A simple image-to-image workflow using a FLUX dev model. ",
"version": "1.1.0",
"contact": "",
"tags": "image2image, flux, image-to-image",
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend using FLUX dev models for image-to-image workflows. The image-to-image performance with FLUX schnell models is poor.",
"exposedFields": [
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "model"
},
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "t5_encoder_model"
},
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "clip_embed_model"
},
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "vae_model"
},
{
"nodeId": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"fieldName": "prompt"
},
{
"nodeId": "2981a67c-480f-4237-9384-26b68dbf912b",
"fieldName": "image"
}
],
"meta": {
"version": "3.0.0",
"category": "default"
},
"nodes": [
{
"id": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"type": "invocation",
"data": {
"id": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"type": "flux_denoise",
"version": "2.1.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": true,
"nodePack": "invokeai",
"inputs": {
"board": {
"name": "board",
"label": ""
},
"metadata": {
"name": "metadata",
"label": ""
},
"latents": {
"name": "latents",
"label": ""
},
"denoise_mask": {
"name": "denoise_mask",
"label": ""
},
"denoising_start": {
"name": "denoising_start",
"label": "",
"value": 0.04
},
"denoising_end": {
"name": "denoising_end",
"label": "",
"value": 1
},
"trajectory_guidance_strength": {
"name": "trajectory_guidance_strength",
"label": "",
"value": 0.0
},
"transformer": {
"name": "transformer",
"label": ""
},
"positive_text_conditioning": {
"name": "positive_text_conditioning",
"label": ""
},
"width": {
"name": "width",
"label": "",
"value": 1024
},
"height": {
"name": "height",
"label": "",
"value": 1024
},
"num_steps": {
"name": "num_steps",
"label": "",
"value": 30
},
"guidance": {
"name": "guidance",
"label": "",
"value": 4
},
"seed": {
"name": "seed",
"label": "",
"value": 0
}
}
},
"position": {
"x": 1159.584057771928,
"y": -175.90561201366845
}
},
{
"id": "2981a67c-480f-4237-9384-26b68dbf912b",
"type": "invocation",
"data": {
"id": "2981a67c-480f-4237-9384-26b68dbf912b",
"type": "flux_vae_encode",
"version": "1.0.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": true,
"inputs": {
"image": {
"name": "image",
"label": ""
},
"vae": {
"name": "vae",
"label": ""
}
}
},
"position": {
"x": 732.7680166609682,
"y": -24.37398171806909
}
},
{
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"type": "invocation",
"data": {
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"type": "flux_vae_decode",
"version": "1.0.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": false,
"useCache": true,
"inputs": {
"board": {
"name": "board",
"label": ""
},
"metadata": {
"name": "metadata",
"label": ""
},
"latents": {
"name": "latents",
"label": ""
},
"vae": {
"name": "vae",
"label": ""
}
}
},
"position": {
"x": 1575.5797431839133,
"y": -209.00150975507415
}
},
{
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"type": "invocation",
"data": {
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"type": "flux_model_loader",
"version": "1.0.4",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": false,
"inputs": {
"model": {
"name": "model",
"label": "Model (dev variant recommended for Image-to-Image)",
"value": {
"key": "b4990a6c-0899-48e9-969b-d6f3801acc6a",
"hash": "random:aad8f7bc19ce76541dfb394b62a30f77722542b66e48064a9f25453263b45fba",
"name": "FLUX Dev (Quantized)_2",
"base": "flux",
"type": "main"
}
},
"t5_encoder_model": {
"name": "t5_encoder_model",
"label": "",
"value": {
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
"name": "t5_bnb_int8_quantized_encoder",
"base": "any",
"type": "t5_encoder"
}
},
"clip_embed_model": {
"name": "clip_embed_model",
"label": "",
"value": {
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
"name": "clip-vit-large-patch14",
"base": "any",
"type": "clip_embed"
}
},
"vae_model": {
"name": "vae_model",
"label": "",
"value": {
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
"name": "FLUX.1-schnell_ae",
"base": "flux",
"type": "vae"
}
}
}
},
"position": {
"x": 328.1809894659957,
"y": -90.2241133566946
}
},
{
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"type": "invocation",
"data": {
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"type": "flux_text_encoder",
"version": "1.0.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": true,
"inputs": {
"clip": {
"name": "clip",
"label": ""
},
"t5_encoder": {
"name": "t5_encoder",
"label": ""
},
"t5_max_seq_len": {
"name": "t5_max_seq_len",
"label": "T5 Max Seq Len",
"value": 256
},
"prompt": {
"name": "prompt",
"label": "",
"value": "a cat wearing a birthday hat"
}
}
},
"position": {
"x": 745.8823365057267,
"y": -299.60249175851914
}
},
{
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
"type": "invocation",
"data": {
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
"type": "rand_int",
"version": "1.0.1",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": false,
"inputs": {
"low": {
"name": "low",
"label": "",
"value": 0
},
"high": {
"name": "high",
"label": "",
"value": 2147483647
}
}
},
"position": {
"x": 725.834098928012,
"y": 496.2710031089931
}
}
],
"edges": [
{
"id": "reactflow__edge-eebd7252-0bd8-401a-bb26-2b8bc64892falatents-7e5172eb-48c1-44db-a770-8fd83e1435d1latents",
"type": "default",
"source": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"sourceHandle": "latents",
"targetHandle": "latents"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90transformer-eebd7252-0bd8-401a-bb26-2b8bc64892fatransformer",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"sourceHandle": "transformer",
"targetHandle": "transformer"
},
{
"id": "reactflow__edge-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cconditioning-eebd7252-0bd8-401a-bb26-2b8bc64892fapositive_text_conditioning",
"type": "default",
"source": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"target": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"sourceHandle": "conditioning",
"targetHandle": "positive_text_conditioning"
},
{
"id": "reactflow__edge-2981a67c-480f-4237-9384-26b68dbf912blatents-eebd7252-0bd8-401a-bb26-2b8bc64892falatents",
"type": "default",
"source": "2981a67c-480f-4237-9384-26b68dbf912b",
"target": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"sourceHandle": "latents",
"targetHandle": "latents"
},
{
"id": "reactflow__edge-2981a67c-480f-4237-9384-26b68dbf912bwidth-eebd7252-0bd8-401a-bb26-2b8bc64892fawidth",
"type": "default",
"source": "2981a67c-480f-4237-9384-26b68dbf912b",
"target": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"sourceHandle": "width",
"targetHandle": "width"
},
{
"id": "reactflow__edge-2981a67c-480f-4237-9384-26b68dbf912bheight-eebd7252-0bd8-401a-bb26-2b8bc64892faheight",
"type": "default",
"source": "2981a67c-480f-4237-9384-26b68dbf912b",
"target": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"sourceHandle": "height",
"targetHandle": "height"
},
{
"id": "reactflow__edge-4754c534-a5f3-4ad0-9382-7887985e668cvalue-eebd7252-0bd8-401a-bb26-2b8bc64892faseed",
"type": "default",
"source": "4754c534-a5f3-4ad0-9382-7887985e668c",
"target": "eebd7252-0bd8-401a-bb26-2b8bc64892fa",
"sourceHandle": "value",
"targetHandle": "seed"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90vae-2981a67c-480f-4237-9384-26b68dbf912bvae",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "2981a67c-480f-4237-9384-26b68dbf912b",
"sourceHandle": "vae",
"targetHandle": "vae"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90vae-7e5172eb-48c1-44db-a770-8fd83e1435d1vae",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"sourceHandle": "vae",
"targetHandle": "vae"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90max_seq_len-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_max_seq_len",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "max_seq_len",
"targetHandle": "t5_max_seq_len"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90t5_encoder-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_encoder",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "t5_encoder",
"targetHandle": "t5_encoder"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90clip-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cclip",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "clip",
"targetHandle": "clip"
}
]
}

View File

@@ -1,356 +0,0 @@
{
"name": "FLUX Text to Image",
"author": "InvokeAI",
"description": "A simple text-to-image workflow using FLUX dev or schnell models.",
"version": "1.1.0",
"contact": "",
"tags": "text2image, flux",
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
"exposedFields": [
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "model"
},
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "t5_encoder_model"
},
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "clip_embed_model"
},
{
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"fieldName": "vae_model"
},
{
"nodeId": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"fieldName": "prompt"
}
],
"meta": {
"version": "3.0.0",
"category": "default"
},
"nodes": [
{
"id": "4ecda92d-ee0e-45ca-aa35-6e9410ac39b9",
"type": "invocation",
"data": {
"id": "4ecda92d-ee0e-45ca-aa35-6e9410ac39b9",
"type": "flux_denoise",
"version": "2.1.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": true,
"nodePack": "invokeai",
"inputs": {
"board": {
"name": "board",
"label": ""
},
"metadata": {
"name": "metadata",
"label": ""
},
"latents": {
"name": "latents",
"label": ""
},
"denoise_mask": {
"name": "denoise_mask",
"label": ""
},
"denoising_start": {
"name": "denoising_start",
"label": "",
"value": 0
},
"denoising_end": {
"name": "denoising_end",
"label": "",
"value": 1
},
"trajectory_guidance_strength": {
"name": "trajectory_guidance_strength",
"label": "",
"value": 0
},
"transformer": {
"name": "transformer",
"label": ""
},
"positive_text_conditioning": {
"name": "positive_text_conditioning",
"label": ""
},
"width": {
"name": "width",
"label": "",
"value": 1024
},
"height": {
"name": "height",
"label": "",
"value": 1024
},
"num_steps": {
"name": "num_steps",
"label": "",
"value": 4
},
"guidance": {
"name": "guidance",
"label": "",
"value": 4
},
"seed": {
"name": "seed",
"label": "",
"value": 0
}
}
},
"position": {
"x": 1161.0101524413685,
"y": -223.33548695623742
}
},
{
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"type": "invocation",
"data": {
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"type": "flux_vae_decode",
"version": "1.0.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": false,
"useCache": true,
"inputs": {
"board": {
"name": "board",
"label": ""
},
"metadata": {
"name": "metadata",
"label": ""
},
"latents": {
"name": "latents",
"label": ""
},
"vae": {
"name": "vae",
"label": ""
}
}
},
"position": {
"x": 1575.5797431839133,
"y": -209.00150975507415
}
},
{
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"type": "invocation",
"data": {
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"type": "flux_model_loader",
"version": "1.0.4",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": false,
"inputs": {
"model": {
"name": "model",
"label": "",
"value": {
"key": "b4990a6c-0899-48e9-969b-d6f3801acc6a",
"hash": "random:aad8f7bc19ce76541dfb394b62a30f77722542b66e48064a9f25453263b45fba",
"name": "FLUX Dev (Quantized)_2",
"base": "flux",
"type": "main"
}
},
"t5_encoder_model": {
"name": "t5_encoder_model",
"label": "",
"value": {
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
"name": "t5_bnb_int8_quantized_encoder",
"base": "any",
"type": "t5_encoder"
}
},
"clip_embed_model": {
"name": "clip_embed_model",
"label": "",
"value": {
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
"name": "clip-vit-large-patch14",
"base": "any",
"type": "clip_embed"
}
},
"vae_model": {
"name": "vae_model",
"label": "",
"value": {
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
"name": "FLUX.1-schnell_ae",
"base": "flux",
"type": "vae"
}
}
}
},
"position": {
"x": 381.1882713063478,
"y": -95.89663532854017
}
},
{
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"type": "invocation",
"data": {
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"type": "flux_text_encoder",
"version": "1.0.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": true,
"inputs": {
"clip": {
"name": "clip",
"label": ""
},
"t5_encoder": {
"name": "t5_encoder",
"label": ""
},
"t5_max_seq_len": {
"name": "t5_max_seq_len",
"label": "T5 Max Seq Len",
"value": 256
},
"prompt": {
"name": "prompt",
"label": "",
"value": "a cat"
}
}
},
"position": {
"x": 778.4899149328337,
"y": -100.36469216659502
}
},
{
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
"type": "invocation",
"data": {
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
"type": "rand_int",
"version": "1.0.1",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": false,
"inputs": {
"low": {
"name": "low",
"label": "",
"value": 0
},
"high": {
"name": "high",
"label": "",
"value": 2147483647
}
}
},
"position": {
"x": 800.9667463219505,
"y": 285.8297267547506
}
}
],
"edges": [
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90transformer-4ecda92d-ee0e-45ca-aa35-6e9410ac39b9transformer",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "4ecda92d-ee0e-45ca-aa35-6e9410ac39b9",
"sourceHandle": "transformer",
"targetHandle": "transformer"
},
{
"id": "reactflow__edge-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cconditioning-4ecda92d-ee0e-45ca-aa35-6e9410ac39b9positive_text_conditioning",
"type": "default",
"source": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"target": "4ecda92d-ee0e-45ca-aa35-6e9410ac39b9",
"sourceHandle": "conditioning",
"targetHandle": "positive_text_conditioning"
},
{
"id": "reactflow__edge-4754c534-a5f3-4ad0-9382-7887985e668cvalue-4ecda92d-ee0e-45ca-aa35-6e9410ac39b9seed",
"type": "default",
"source": "4754c534-a5f3-4ad0-9382-7887985e668c",
"target": "4ecda92d-ee0e-45ca-aa35-6e9410ac39b9",
"sourceHandle": "value",
"targetHandle": "seed"
},
{
"id": "reactflow__edge-4ecda92d-ee0e-45ca-aa35-6e9410ac39b9latents-7e5172eb-48c1-44db-a770-8fd83e1435d1latents",
"type": "default",
"source": "4ecda92d-ee0e-45ca-aa35-6e9410ac39b9",
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"sourceHandle": "latents",
"targetHandle": "latents"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90vae-7e5172eb-48c1-44db-a770-8fd83e1435d1vae",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
"sourceHandle": "vae",
"targetHandle": "vae"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90max_seq_len-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_max_seq_len",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "max_seq_len",
"targetHandle": "t5_max_seq_len"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90t5_encoder-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_encoder",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "t5_encoder",
"targetHandle": "t5_encoder"
},
{
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90clip-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cclip",
"type": "default",
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "clip",
"targetHandle": "clip"
}
]
}

View File

@@ -38,25 +38,6 @@ SD1_5_LATENT_RGB_FACTORS = [
[-0.1307, -0.1874, -0.7445], # L4
]
FLUX_LATENT_RGB_FACTORS = [
[-0.0412, 0.0149, 0.0521],
[0.0056, 0.0291, 0.0768],
[0.0342, -0.0681, -0.0427],
[-0.0258, 0.0092, 0.0463],
[0.0863, 0.0784, 0.0547],
[-0.0017, 0.0402, 0.0158],
[0.0501, 0.1058, 0.1152],
[-0.0209, -0.0218, -0.0329],
[-0.0314, 0.0083, 0.0896],
[0.0851, 0.0665, -0.0472],
[-0.0534, 0.0238, -0.0024],
[0.0452, -0.0026, 0.0048],
[0.0892, 0.0831, 0.0881],
[-0.1117, -0.0304, -0.0789],
[0.0027, -0.0479, -0.0043],
[-0.1146, -0.0827, -0.0598],
]
def sample_to_lowres_estimated_image(
samples: torch.Tensor, latent_rgb_factors: torch.Tensor, smooth_matrix: Optional[torch.Tensor] = None
@@ -113,32 +94,3 @@ def stable_diffusion_step_callback(
intermediate_state,
ProgressImage(dataURL=dataURL, width=width, height=height),
)
def flux_step_callback(
context_data: "InvocationContextData",
intermediate_state: PipelineIntermediateState,
events: "EventServiceBase",
is_canceled: Callable[[], bool],
) -> None:
if is_canceled():
raise CanceledException
sample = intermediate_state.latents
latent_rgb_factors = torch.tensor(FLUX_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
latent_image_perm = sample.permute(1, 2, 0).to(dtype=sample.dtype, device=sample.device)
latent_image = latent_image_perm @ latent_rgb_factors
latents_ubyte = (
((latent_image + 1) / 2).clamp(0, 1).mul(0xFF) # change scale from -1..1 to 0..1 # to 0..255
).to(device="cpu", dtype=torch.uint8)
image = Image.fromarray(latents_ubyte.cpu().numpy())
(width, height) = image.size
width *= 8
height *= 8
dataURL = image_to_dataURL(image, image_format="JPEG")
events.emit_invocation_denoise_progress(
context_data.queue_item,
context_data.invocation,
intermediate_state,
ProgressImage(dataURL=dataURL, width=width, height=height),
)

View File

@@ -1,25 +0,0 @@
{
"_name_or_path": "openai/clip-vit-large-patch14",
"architectures": [
"CLIPTextModel"
],
"attention_dropout": 0.0,
"bos_token_id": 0,
"dropout": 0.0,
"eos_token_id": 2,
"hidden_act": "quick_gelu",
"hidden_size": 768,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 77,
"model_type": "clip_text_model",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 1,
"projection_dim": 768,
"torch_dtype": "bfloat16",
"transformers_version": "4.43.3",
"vocab_size": 49408
}

View File

@@ -1,59 +0,0 @@
from typing import Callable
import torch
from tqdm import tqdm
from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.trajectory_guidance_extension import TrajectoryGuidanceExtension
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
def denoise(
model: Flux,
# model input
img: torch.Tensor,
img_ids: torch.Tensor,
txt: torch.Tensor,
txt_ids: torch.Tensor,
vec: torch.Tensor,
# sampling parameters
timesteps: list[float],
step_callback: Callable[[PipelineIntermediateState], None],
guidance: float,
traj_guidance_extension: TrajectoryGuidanceExtension | None, # noqa: F821
):
step = 0
# guidance_vec is ignored for schnell.
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
pred = model(
img=img,
img_ids=img_ids,
txt=txt,
txt_ids=txt_ids,
y=vec,
timesteps=t_vec,
guidance=guidance_vec,
)
if traj_guidance_extension is not None:
pred = traj_guidance_extension.update_noise(
t_curr_latents=img, pred_noise=pred, t_curr=t_curr, t_prev=t_prev
)
preview_img = img - t_curr * pred
img = img + (t_prev - t_curr) * pred
step_callback(
PipelineIntermediateState(
step=step,
order=1,
total_steps=len(timesteps),
timestep=int(t_curr),
latents=preview_img,
),
)
step += 1
return img

View File

@@ -1,35 +0,0 @@
import torch
class InpaintExtension:
"""A class for managing inpainting with FLUX."""
def __init__(self, init_latents: torch.Tensor, inpaint_mask: torch.Tensor, noise: torch.Tensor):
"""Initialize InpaintExtension.
Args:
init_latents (torch.Tensor): The initial latents (i.e. un-noised at timestep 0). In 'packed' format.
inpaint_mask (torch.Tensor): A mask specifying which elements to inpaint. Range [0, 1]. Values of 1 will be
re-generated. Values of 0 will remain unchanged. Values between 0 and 1 can be used to blend the
inpainted region with the background. In 'packed' format.
noise (torch.Tensor): The noise tensor used to noise the init_latents. In 'packed' format.
"""
assert init_latents.shape == inpaint_mask.shape == noise.shape
self._init_latents = init_latents
self._inpaint_mask = inpaint_mask
self._noise = noise
def merge_intermediate_latents_with_init_latents(
self, intermediate_latents: torch.Tensor, timestep: float
) -> torch.Tensor:
"""Merge the intermediate latents with the initial latents for the current timestep using the inpaint mask. I.e.
update the intermediate latents to keep the regions that are not being inpainted on the correct noise
trajectory.
This function should be called after each denoising step.
"""
# Noise the init latents for the current timestep.
noised_init_latents = self._noise * timestep + (1.0 - timestep) * self._init_latents
# Merge the intermediate latents with the noised_init_latents using the inpaint_mask.
return intermediate_latents * self._inpaint_mask + noised_init_latents * (1.0 - self._inpaint_mask)

View File

@@ -1,32 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
import torch
from einops import rearrange
from torch import Tensor
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
q, k = apply_rope(q, k, pe)
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
x = rearrange(x, "B H L D -> B L (H D)")
return x
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
assert dim % 2 == 0
scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
omega = 1.0 / (theta**scale)
out = torch.einsum("...n,d->...nd", pos, omega)
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
return out.float()
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)

View File

@@ -1,117 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from dataclasses import dataclass
import torch
from torch import Tensor, nn
from invokeai.backend.flux.modules.layers import (
DoubleStreamBlock,
EmbedND,
LastLayer,
MLPEmbedder,
SingleStreamBlock,
timestep_embedding,
)
@dataclass
class FluxParams:
in_channels: int
vec_in_dim: int
context_in_dim: int
hidden_size: int
mlp_ratio: float
num_heads: int
depth: int
depth_single_blocks: int
axes_dim: list[int]
theta: int
qkv_bias: bool
guidance_embed: bool
class Flux(nn.Module):
"""
Transformer model for flow matching on sequences.
"""
def __init__(self, params: FluxParams):
super().__init__()
self.params = params
self.in_channels = params.in_channels
self.out_channels = self.in_channels
if params.hidden_size % params.num_heads != 0:
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
pe_dim = params.hidden_size // params.num_heads
if sum(params.axes_dim) != pe_dim:
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
self.hidden_size = params.hidden_size
self.num_heads = params.num_heads
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
self.guidance_in = (
MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity()
)
self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size)
self.double_blocks = nn.ModuleList(
[
DoubleStreamBlock(
self.hidden_size,
self.num_heads,
mlp_ratio=params.mlp_ratio,
qkv_bias=params.qkv_bias,
)
for _ in range(params.depth)
]
)
self.single_blocks = nn.ModuleList(
[
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio)
for _ in range(params.depth_single_blocks)
]
)
self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
def forward(
self,
img: Tensor,
img_ids: Tensor,
txt: Tensor,
txt_ids: Tensor,
timesteps: Tensor,
y: Tensor,
guidance: Tensor | None = None,
) -> Tensor:
if img.ndim != 3 or txt.ndim != 3:
raise ValueError("Input img and txt tensors must have 3 dimensions.")
# running on sequences img
img = self.img_in(img)
vec = self.time_in(timestep_embedding(timesteps, 256))
if self.params.guidance_embed:
if guidance is None:
raise ValueError("Didn't get guidance strength for guidance distilled model.")
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
vec = vec + self.vector_in(y)
txt = self.txt_in(txt)
ids = torch.cat((txt_ids, img_ids), dim=1)
pe = self.pe_embedder(ids)
for block in self.double_blocks:
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
img = torch.cat((txt, img), 1)
for block in self.single_blocks:
img = block(img, vec=vec, pe=pe)
img = img[:, txt.shape[1] :, ...]
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
return img

View File

@@ -1,324 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from dataclasses import dataclass
import torch
from einops import rearrange
from torch import Tensor, nn
@dataclass
class AutoEncoderParams:
resolution: int
in_channels: int
ch: int
out_ch: int
ch_mult: list[int]
num_res_blocks: int
z_channels: int
scale_factor: float
shift_factor: float
class AttnBlock(nn.Module):
def __init__(self, in_channels: int):
super().__init__()
self.in_channels = in_channels
self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1)
def attention(self, h_: Tensor) -> Tensor:
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
b, c, h, w = q.shape
q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous()
k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous()
v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous()
h_ = nn.functional.scaled_dot_product_attention(q, k, v)
return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
def forward(self, x: Tensor) -> Tensor:
return x + self.proj_out(self.attention(x))
class ResnetBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
h = x
h = self.norm1(h)
h = torch.nn.functional.silu(h)
h = self.conv1(h)
h = self.norm2(h)
h = torch.nn.functional.silu(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
x = self.nin_shortcut(x)
return x + h
class Downsample(nn.Module):
def __init__(self, in_channels: int):
super().__init__()
# no asymmetric padding in torch conv, must do it ourselves
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
def forward(self, x: Tensor):
pad = (0, 1, 0, 1)
x = nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
return x
class Upsample(nn.Module):
def __init__(self, in_channels: int):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x: Tensor):
x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
x = self.conv(x)
return x
class Encoder(nn.Module):
def __init__(
self,
resolution: int,
in_channels: int,
ch: int,
ch_mult: list[int],
num_res_blocks: int,
z_channels: int,
):
super().__init__()
self.ch = ch
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
curr_res = resolution
in_ch_mult = (1,) + tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
block_in = self.ch
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch * in_ch_mult[i_level]
block_out = ch * ch_mult[i_level]
for _ in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
block_in = block_out
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
# end
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x: Tensor) -> Tensor:
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1])
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h)
h = self.mid.attn_1(h)
h = self.mid.block_2(h)
# end
h = self.norm_out(h)
h = torch.nn.functional.silu(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(
self,
ch: int,
out_ch: int,
ch_mult: list[int],
num_res_blocks: int,
in_channels: int,
resolution: int,
z_channels: int,
):
super().__init__()
self.ch = ch
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.ffactor = 2 ** (self.num_resolutions - 1)
# compute in_ch_mult, block_in and curr_res at lowest res
block_in = ch * ch_mult[self.num_resolutions - 1]
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
# z to block_in
self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch * ch_mult[i_level]
for _ in range(self.num_res_blocks + 1):
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
block_in = block_out
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
def forward(self, z: Tensor) -> Tensor:
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h)
h = self.mid.attn_1(h)
h = self.mid.block_2(h)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.up[i_level].block[i_block](h)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = torch.nn.functional.silu(h)
h = self.conv_out(h)
return h
class DiagonalGaussian(nn.Module):
def __init__(self, chunk_dim: int = 1):
super().__init__()
self.chunk_dim = chunk_dim
def forward(self, z: Tensor, sample: bool = True, generator: torch.Generator | None = None) -> Tensor:
mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim)
if sample:
std = torch.exp(0.5 * logvar)
# Unfortunately, torch.randn_like(...) does not accept a generator argument at the time of writing, so we
# have to use torch.randn(...) instead.
return mean + std * torch.randn(size=mean.size(), generator=generator, dtype=mean.dtype, device=mean.device)
else:
return mean
class AutoEncoder(nn.Module):
def __init__(self, params: AutoEncoderParams):
super().__init__()
self.encoder = Encoder(
resolution=params.resolution,
in_channels=params.in_channels,
ch=params.ch,
ch_mult=params.ch_mult,
num_res_blocks=params.num_res_blocks,
z_channels=params.z_channels,
)
self.decoder = Decoder(
resolution=params.resolution,
in_channels=params.in_channels,
ch=params.ch,
out_ch=params.out_ch,
ch_mult=params.ch_mult,
num_res_blocks=params.num_res_blocks,
z_channels=params.z_channels,
)
self.reg = DiagonalGaussian()
self.scale_factor = params.scale_factor
self.shift_factor = params.shift_factor
def encode(self, x: Tensor, sample: bool = True, generator: torch.Generator | None = None) -> Tensor:
"""Run VAE encoding on input tensor x.
Args:
x (Tensor): Input image tensor. Shape: (batch_size, in_channels, height, width).
sample (bool, optional): If True, sample from the encoded distribution, else, return the distribution mean.
Defaults to True.
generator (torch.Generator | None, optional): Optional random number generator for reproducibility.
Defaults to None.
Returns:
Tensor: Encoded latent tensor. Shape: (batch_size, z_channels, latent_height, latent_width).
"""
z = self.reg(self.encoder(x), sample=sample, generator=generator)
z = self.scale_factor * (z - self.shift_factor)
return z
def decode(self, z: Tensor) -> Tensor:
z = z / self.scale_factor + self.shift_factor
return self.decoder(z)
def forward(self, x: Tensor) -> Tensor:
return self.decode(self.encode(x))

View File

@@ -1,33 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from torch import Tensor, nn
from transformers import PreTrainedModel, PreTrainedTokenizer
class HFEncoder(nn.Module):
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool, max_length: int):
super().__init__()
self.max_length = max_length
self.is_clip = is_clip
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
self.tokenizer = tokenizer
self.hf_module = encoder
self.hf_module = self.hf_module.eval().requires_grad_(False)
def forward(self, text: list[str]) -> Tensor:
batch_encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_length,
return_length=False,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt",
)
outputs = self.hf_module(
input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
attention_mask=None,
output_hidden_states=False,
)
return outputs[self.output_key]

View File

@@ -1,253 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
import math
from dataclasses import dataclass
import torch
from einops import rearrange
from torch import Tensor, nn
from invokeai.backend.flux.math import attention, rope
class EmbedND(nn.Module):
def __init__(self, dim: int, theta: int, axes_dim: list[int]):
super().__init__()
self.dim = dim
self.theta = theta
self.axes_dim = axes_dim
def forward(self, ids: Tensor) -> Tensor:
n_axes = ids.shape[-1]
emb = torch.cat(
[rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
dim=-3,
)
return emb.unsqueeze(1)
def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
"""
Create sinusoidal timestep embeddings.
:param t: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an (N, D) Tensor of positional embeddings.
"""
t = time_factor * t
half = dim // 2
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(t.device)
args = t[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
if torch.is_floating_point(t):
embedding = embedding.to(t)
return embedding
class MLPEmbedder(nn.Module):
def __init__(self, in_dim: int, hidden_dim: int):
super().__init__()
self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True)
self.silu = nn.SiLU()
self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True)
def forward(self, x: Tensor) -> Tensor:
return self.out_layer(self.silu(self.in_layer(x)))
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.scale = nn.Parameter(torch.ones(dim))
def forward(self, x: Tensor):
x_dtype = x.dtype
x = x.float()
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
return (x * rrms).to(dtype=x_dtype) * self.scale
class QKNorm(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.query_norm = RMSNorm(dim)
self.key_norm = RMSNorm(dim)
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]:
q = self.query_norm(q)
k = self.key_norm(k)
return q.to(v), k.to(v)
class SelfAttention(nn.Module):
def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.norm = QKNorm(head_dim)
self.proj = nn.Linear(dim, dim)
def forward(self, x: Tensor, pe: Tensor) -> Tensor:
qkv = self.qkv(x)
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
q, k = self.norm(q, k, v)
x = attention(q, k, v, pe=pe)
x = self.proj(x)
return x
@dataclass
class ModulationOut:
shift: Tensor
scale: Tensor
gate: Tensor
class Modulation(nn.Module):
def __init__(self, dim: int, double: bool):
super().__init__()
self.is_double = double
self.multiplier = 6 if double else 3
self.lin = nn.Linear(dim, self.multiplier * dim, bias=True)
def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]:
out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1)
return (
ModulationOut(*out[:3]),
ModulationOut(*out[3:]) if self.is_double else None,
)
class DoubleStreamBlock(nn.Module):
def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False):
super().__init__()
mlp_hidden_dim = int(hidden_size * mlp_ratio)
self.num_heads = num_heads
self.hidden_size = hidden_size
self.img_mod = Modulation(hidden_size, double=True)
self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.img_mlp = nn.Sequential(
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
nn.GELU(approximate="tanh"),
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
)
self.txt_mod = Modulation(hidden_size, double=True)
self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.txt_mlp = nn.Sequential(
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
nn.GELU(approximate="tanh"),
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
)
def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor) -> tuple[Tensor, Tensor]:
img_mod1, img_mod2 = self.img_mod(vec)
txt_mod1, txt_mod2 = self.txt_mod(vec)
# prepare image for attention
img_modulated = self.img_norm1(img)
img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
img_qkv = self.img_attn.qkv(img_modulated)
img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
# prepare txt for attention
txt_modulated = self.txt_norm1(txt)
txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
txt_qkv = self.txt_attn.qkv(txt_modulated)
txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
# run actual attention
q = torch.cat((txt_q, img_q), dim=2)
k = torch.cat((txt_k, img_k), dim=2)
v = torch.cat((txt_v, img_v), dim=2)
attn = attention(q, k, v, pe=pe)
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
# calculate the img bloks
img = img + img_mod1.gate * self.img_attn.proj(img_attn)
img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift)
# calculate the txt bloks
txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn)
txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift)
return img, txt
class SingleStreamBlock(nn.Module):
"""
A DiT block with parallel linear layers as described in
https://arxiv.org/abs/2302.05442 and adapted modulation interface.
"""
def __init__(
self,
hidden_size: int,
num_heads: int,
mlp_ratio: float = 4.0,
qk_scale: float | None = None,
):
super().__init__()
self.hidden_dim = hidden_size
self.num_heads = num_heads
head_dim = hidden_size // num_heads
self.scale = qk_scale or head_dim**-0.5
self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
# qkv and mlp_in
self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim)
# proj and mlp_out
self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size)
self.norm = QKNorm(head_dim)
self.hidden_size = hidden_size
self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.mlp_act = nn.GELU(approximate="tanh")
self.modulation = Modulation(hidden_size, double=False)
def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor:
mod, _ = self.modulation(vec)
x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
q, k = self.norm(q, k, v)
# compute attention
attn = attention(q, k, v, pe=pe)
# compute activation in mlp stream, cat again and run second linear layer
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
return x + mod.gate * output
class LastLayer(nn.Module):
def __init__(self, hidden_size: int, patch_size: int, out_channels: int):
super().__init__()
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
def forward(self, x: Tensor, vec: Tensor) -> Tensor:
shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1)
x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :]
x = self.linear(x)
return x

View File

@@ -1,135 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
import math
from typing import Callable
import torch
from einops import rearrange, repeat
def get_noise(
num_samples: int,
height: int,
width: int,
device: torch.device,
dtype: torch.dtype,
seed: int,
):
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
rand_device = "cpu"
rand_dtype = torch.float16
return torch.randn(
num_samples,
16,
# allow for packing
2 * math.ceil(height / 16),
2 * math.ceil(width / 16),
device=rand_device,
dtype=rand_dtype,
generator=torch.Generator(device=rand_device).manual_seed(seed),
).to(device=device, dtype=dtype)
def time_shift(mu: float, sigma: float, t: torch.Tensor) -> torch.Tensor:
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
def get_lin_function(x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15) -> Callable[[float], float]:
m = (y2 - y1) / (x2 - x1)
b = y1 - m * x1
return lambda x: m * x + b
def get_schedule(
num_steps: int,
image_seq_len: int,
base_shift: float = 0.5,
max_shift: float = 1.15,
shift: bool = True,
) -> list[float]:
# extra step for zero
timesteps = torch.linspace(1, 0, num_steps + 1)
# shifting the schedule to favor high timesteps for higher signal images
if shift:
# estimate mu based on linear estimation between two points
mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
timesteps = time_shift(mu, 1.0, timesteps)
return timesteps.tolist()
def _find_last_index_ge_val(timesteps: list[float], val: float, eps: float = 1e-6) -> int:
"""Find the last index in timesteps that is >= val.
We use epsilon-close equality to avoid potential floating point errors.
"""
idx = len(list(filter(lambda t: t >= (val - eps), timesteps))) - 1
assert idx >= 0
return idx
def clip_timestep_schedule(timesteps: list[float], denoising_start: float, denoising_end: float) -> list[float]:
"""Clip the timestep schedule to the denoising range.
Args:
timesteps (list[float]): The original timestep schedule: [1.0, ..., 0.0].
denoising_start (float): A value in [0, 1] specifying the start of the denoising process. E.g. a value of 0.2
would mean that the denoising process start at the last timestep in the schedule >= 0.8.
denoising_end (float): A value in [0, 1] specifying the end of the denoising process. E.g. a value of 0.8 would
mean that the denoising process end at the last timestep in the schedule >= 0.2.
Returns:
list[float]: The clipped timestep schedule.
"""
assert 0.0 <= denoising_start <= 1.0
assert 0.0 <= denoising_end <= 1.0
assert denoising_start <= denoising_end
t_start_val = 1.0 - denoising_start
t_end_val = 1.0 - denoising_end
t_start_idx = _find_last_index_ge_val(timesteps, t_start_val)
t_end_idx = _find_last_index_ge_val(timesteps, t_end_val)
clipped_timesteps = timesteps[t_start_idx : t_end_idx + 1]
return clipped_timesteps
def unpack(x: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""Unpack flat array of patch embeddings to latent image."""
return rearrange(
x,
"b (h w) (c ph pw) -> b c (h ph) (w pw)",
h=math.ceil(height / 16),
w=math.ceil(width / 16),
ph=2,
pw=2,
)
def pack(x: torch.Tensor) -> torch.Tensor:
"""Pack latent image to flattented array of patch embeddings."""
# Pixel unshuffle with a scale of 2, and flatten the height/width dimensions to get an array of patches.
return rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
def generate_img_ids(h: int, w: int, batch_size: int, device: torch.device, dtype: torch.dtype) -> torch.Tensor:
"""Generate tensor of image position ids.
Args:
h (int): Height of image in latent space.
w (int): Width of image in latent space.
batch_size (int): Batch size.
device (torch.device): Device.
dtype (torch.dtype): dtype.
Returns:
torch.Tensor: Image position ids.
"""
img_ids = torch.zeros(h // 2, w // 2, 3, device=device, dtype=dtype)
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=device, dtype=dtype)[:, None]
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=device, dtype=dtype)[None, :]
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
return img_ids

View File

@@ -1,134 +0,0 @@
import torch
from invokeai.backend.util.build_line import build_line
class TrajectoryGuidanceExtension:
"""An implementation of trajectory guidance for FLUX.
What is trajectory guidance?
----------------------------
With SD 1 and SDXL, the amount of change in image-to-image denoising is largely controlled by the denoising_start
parameter. Doing the same thing with the FLUX model does not work as well, because the FLUX model converges very
quickly (roughly time 1.0 to 0.9) to the structure of the final image. The result of this model characteristic is
that you typically get one of two outcomes:
1) a result that is very similar to the original image
2) a result that is very different from the original image, as though it was generated from the text prompt with
pure noise.
To address this issue with image-to-image workflows with FLUX, we employ the concept of trajectory guidance. The
idea is that in addition to controlling the denoising_start parameter (i.e. the amount of noise added to the
original image), we can also guide the denoising process to stay close to the trajectory that would reproduce the
original. By controlling the strength of the trajectory guidance throughout the denoising process, we can achieve
FLUX image-to-image behavior with the same level of control offered by SD1 and SDXL.
What is the trajectory_guidance_strength?
-----------------------------------------
In the limit, we could apply a different trajectory guidance 'strength' for every latent value in every timestep.
This would be impractical for a user, so instead we have engineered a strength schedule that is more convenient to
use. The `trajectory_guidance_strength` parameter is a single scalar value that maps to a schedule. The engineered
schedule is defined as:
1) An initial change_ratio at t=1.0.
2) A linear ramp up to change_ratio=1.0 at t = t_cutoff.
3) A constant change_ratio=1.0 after t = t_cutoff.
"""
def __init__(
self, init_latents: torch.Tensor, inpaint_mask: torch.Tensor | None, trajectory_guidance_strength: float
):
"""Initialize TrajectoryGuidanceExtension.
Args:
init_latents (torch.Tensor): The initial latents (i.e. un-noised at timestep 0). In 'packed' format.
inpaint_mask (torch.Tensor | None): A mask specifying which elements to inpaint. Range [0, 1]. Values of 1
will be re-generated. Values of 0 will remain unchanged. Values between 0 and 1 can be used to blend the
inpainted region with the background. In 'packed' format. If None, will be treated as a mask of all 1s.
trajectory_guidance_strength (float): A value in [0, 1] specifying the strength of the trajectory guidance.
A value of 0.0 is equivalent to vanilla image-to-image. A value of 1.0 will guide the denoising process
very close to the original latents.
"""
assert 0.0 <= trajectory_guidance_strength <= 1.0
self._init_latents = init_latents
if inpaint_mask is None:
# The inpaing mask is None, so we initialize a mask with a single value of 1.0.
# This value will be broadcasted and treated as a mask of all 1s.
self._inpaint_mask = torch.ones(1, device=init_latents.device, dtype=init_latents.dtype)
else:
self._inpaint_mask = inpaint_mask
# Calculate the params that define the trajectory guidance schedule.
# These mappings from trajectory_guidance_strength have no theoretical basis - they were tuned manually.
self._trajectory_guidance_strength = trajectory_guidance_strength
self._change_ratio_at_t_1 = build_line(x1=0.0, y1=1.0, x2=1.0, y2=0.0)(self._trajectory_guidance_strength)
self._change_ratio_at_cutoff = 1.0
self._t_cutoff = build_line(x1=0.0, y1=1.0, x2=1.0, y2=0.5)(self._trajectory_guidance_strength)
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
# threshold worked well.
# We use a small epsilon to avoid any potential issues with floating point precision.
eps = 1e-4
mask_gradient_t_cutoff = 0.5
if t_prev > mask_gradient_t_cutoff:
# Early in the denoising process, use the inpaint mask as-is.
return self._inpaint_mask
else:
# After the cut-off, promote all non-zero mask values to 1.0.
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
return mask
def _get_change_ratio(self, t_prev: float) -> float:
"""Get the change_ratio for t_prev based on the change schedule."""
change_ratio = 1.0
if t_prev > self._t_cutoff:
# If we are before the cutoff, linearly interpolate between the change_ratio at t=1.0 and the change_ratio
# at the cutoff.
change_ratio = build_line(
x1=1.0, y1=self._change_ratio_at_t_1, x2=self._t_cutoff, y2=self._change_ratio_at_cutoff
)(t_prev)
# The change_ratio should be in the range [0, 1]. Assert that we didn't make any mistakes.
eps = 1e-5
assert 0.0 - eps <= change_ratio <= 1.0 + eps
return change_ratio
def update_noise(
self, t_curr_latents: torch.Tensor, pred_noise: torch.Tensor, t_curr: float, t_prev: float
) -> torch.Tensor:
# Handle gradient cutoff.
mask = self._apply_mask_gradient_adjustment(t_prev)
mask = mask * self._get_change_ratio(t_prev)
# NOTE(ryand): During inpainting, it is common to guide the denoising process by noising the initial latents for
# the current timestep and then blending the predicted intermediate latents with the noised initial latents.
# For example:
# ```
# noised_init_latents = self._noise * t_prev + (1.0 - t_prev) * self._init_latents
# return t_prev_latents * self._inpaint_mask + noised_init_latents * (1.0 - self._inpaint_mask)
# ```
# Instead of guiding based on the noised initial latents, we have decided to guide based on the noise prediction
# that points towards the initial latents. The difference between these guidance strategies is minor, but
# qualitatively we found the latter to produce slightly better results. When change_ratio is 0.0 or 1.0 there is
# no difference between the two strategies.
#
# We experimented with a number of related guidance strategies, but not exhaustively. It's entirely possible
# that there's a much better way to do this.
# Calculate noise guidance
# What noise should the model have predicted at this timestep to step towards self._init_latents?
# Derivation:
# > t_prev_latents = t_curr_latents + (t_prev - t_curr) * pred_noise
# > t_0_latents = t_curr_latents + (0 - t_curr) * init_traj_noise
# > t_0_latents = t_curr_latents - t_curr * init_traj_noise
# > init_traj_noise = (t_curr_latents - t_0_latents) / t_curr)
init_traj_noise = (t_curr_latents - self._init_latents) / t_curr
# Blend the init_traj_noise with the pred_noise according to the inpaint mask and the trajectory guidance.
noise = pred_noise * mask + init_traj_noise * (1.0 - mask)
return noise

View File

@@ -1,71 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from dataclasses import dataclass
from typing import Dict, Literal
from invokeai.backend.flux.model import FluxParams
from invokeai.backend.flux.modules.autoencoder import AutoEncoderParams
@dataclass
class ModelSpec:
params: FluxParams
ae_params: AutoEncoderParams
ckpt_path: str | None
ae_path: str | None
repo_id: str | None
repo_flow: str | None
repo_ae: str | None
max_seq_lengths: Dict[str, Literal[256, 512]] = {
"flux-dev": 512,
"flux-schnell": 256,
}
ae_params = {
"flux": AutoEncoderParams(
resolution=256,
in_channels=3,
ch=128,
out_ch=3,
ch_mult=[1, 2, 4, 4],
num_res_blocks=2,
z_channels=16,
scale_factor=0.3611,
shift_factor=0.1159,
)
}
params = {
"flux-dev": FluxParams(
in_channels=64,
vec_in_dim=768,
context_in_dim=4096,
hidden_size=3072,
mlp_ratio=4.0,
num_heads=24,
depth=19,
depth_single_blocks=38,
axes_dim=[16, 56, 56],
theta=10_000,
qkv_bias=True,
guidance_embed=True,
),
"flux-schnell": FluxParams(
in_channels=64,
vec_in_dim=768,
context_in_dim=4096,
hidden_size=3072,
mlp_ratio=4.0,
num_heads=24,
depth=19,
depth_single_blocks=38,
axes_dim=[16, 56, 56],
theta=10_000,
qkv_bias=True,
guidance_embed=False,
),
}

View File

@@ -1,40 +0,0 @@
# Adapted from https://github.com/huggingface/controlnet_aux
import cv2
import numpy as np
from PIL import Image
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
def make_noise_disk(H, W, C, F):
noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C))
noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC)
noise = noise[F : F + H, F : F + W]
noise -= np.min(noise)
noise /= np.max(noise)
if C == 1:
noise = noise[:, :, None]
return noise
def content_shuffle(input_image: Image.Image, scale_factor: int | None = None) -> Image.Image:
"""Shuffles the content of an image using a disk noise pattern, similar to a 'liquify' effect."""
np_img = pil_to_np(input_image)
height, width, _channels = np_img.shape
if scale_factor is None:
scale_factor = 256
x = make_noise_disk(height, width, 1, scale_factor) * float(width - 1)
y = make_noise_disk(height, width, 1, scale_factor) * float(height - 1)
flow = np.concatenate([x, y], axis=2).astype(np.float32)
shuffled_img = cv2.remap(np_img, flow, None, cv2.INTER_LINEAR)
output_img = np_to_pil(shuffled_img)
return output_img

View File

@@ -1,9 +1,7 @@
import pathlib
from typing import Optional
import torch
from PIL import Image
from transformers import pipeline
from transformers.pipelines import DepthEstimationPipeline
from invokeai.backend.raw_model import RawModel
@@ -31,11 +29,3 @@ class DepthAnythingPipeline(RawModel):
from invokeai.backend.model_manager.load.model_util import calc_module_size
return calc_module_size(self._pipeline.model)
@classmethod
def load_model(cls, model_path: pathlib.Path):
"""Load the model from the given path and return a DepthAnythingPipeline instance."""
depth_anything_pipeline = pipeline(model=str(model_path), task="depth-estimation", local_files_only=True)
assert isinstance(depth_anything_pipeline, DepthEstimationPipeline)
return cls(depth_anything_pipeline)

View File

@@ -1,19 +1,13 @@
from pathlib import Path
from typing import Dict
import huggingface_hub
import numpy as np
import onnxruntime as ort
import torch
from controlnet_aux.util import resize_image
from PIL import Image
from invokeai.backend.image_util.dw_openpose.onnxdet import inference_detector
from invokeai.backend.image_util.dw_openpose.onnxpose import inference_pose
from invokeai.backend.image_util.dw_openpose.utils import NDArrayInt, draw_bodypose, draw_facepose, draw_handpose
from invokeai.backend.image_util.dw_openpose.wholebody import Wholebody
from invokeai.backend.image_util.util import np_to_pil
from invokeai.backend.util.devices import TorchDevice
DWPOSE_MODELS = {
"yolox_l.onnx": "https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx?download=true",
@@ -115,142 +109,4 @@ class DWOpenposeDetector:
)
class DWOpenposeDetector2:
"""
Code from the original implementation of the DW Openpose Detector.
Credits: https://github.com/IDEA-Research/DWPose
This implementation is similar to DWOpenposeDetector, with some alterations to allow the onnx models to be loaded
and managed by the model manager.
"""
hf_repo_id = "yzd-v/DWPose"
hf_filename_onnx_det = "yolox_l.onnx"
hf_filename_onnx_pose = "dw-ll_ucoco_384.onnx"
@classmethod
def get_model_url_det(cls) -> str:
"""Returns the URL for the detection model."""
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename_onnx_det)
@classmethod
def get_model_url_pose(cls) -> str:
"""Returns the URL for the pose model."""
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename_onnx_pose)
@staticmethod
def create_onnx_inference_session(model_path: Path) -> ort.InferenceSession:
"""Creates an ONNX Inference Session for the given model path, using the appropriate execution provider based on
the device type."""
device = TorchDevice.choose_torch_device()
providers = ["CUDAExecutionProvider"] if device.type == "cuda" else ["CPUExecutionProvider"]
return ort.InferenceSession(path_or_bytes=model_path, providers=providers)
def __init__(self, session_det: ort.InferenceSession, session_pose: ort.InferenceSession):
self.session_det = session_det
self.session_pose = session_pose
def pose_estimation(self, np_image: np.ndarray):
"""Does the pose estimation on the given image and returns the keypoints and scores."""
det_result = inference_detector(self.session_det, np_image)
keypoints, scores = inference_pose(self.session_pose, det_result, np_image)
keypoints_info = np.concatenate((keypoints, scores[..., None]), axis=-1)
# compute neck joint
neck = np.mean(keypoints_info[:, [5, 6]], axis=1)
# neck score when visualizing pred
neck[:, 2:4] = np.logical_and(keypoints_info[:, 5, 2:4] > 0.3, keypoints_info[:, 6, 2:4] > 0.3).astype(int)
new_keypoints_info = np.insert(keypoints_info, 17, neck, axis=1)
mmpose_idx = [17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3]
openpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17]
new_keypoints_info[:, openpose_idx] = new_keypoints_info[:, mmpose_idx]
keypoints_info = new_keypoints_info
keypoints, scores = keypoints_info[..., :2], keypoints_info[..., 2]
return keypoints, scores
def run(
self,
image: Image.Image,
draw_face: bool = False,
draw_body: bool = True,
draw_hands: bool = False,
) -> Image.Image:
"""Detects the pose in the given image and returns an solid black image with pose drawn on top, suitable for
use with a ControlNet."""
np_image = np.array(image)
H, W, C = np_image.shape
with torch.no_grad():
candidate, subset = self.pose_estimation(np_image)
nums, keys, locs = candidate.shape
candidate[..., 0] /= float(W)
candidate[..., 1] /= float(H)
body = candidate[:, :18].copy()
body = body.reshape(nums * 18, locs)
score = subset[:, :18]
for i in range(len(score)):
for j in range(len(score[i])):
if score[i][j] > 0.3:
score[i][j] = int(18 * i + j)
else:
score[i][j] = -1
un_visible = subset < 0.3
candidate[un_visible] = -1
# foot = candidate[:, 18:24]
faces = candidate[:, 24:92]
hands = candidate[:, 92:113]
hands = np.vstack([hands, candidate[:, 113:]])
bodies = {"candidate": body, "subset": score}
pose = {"bodies": bodies, "hands": hands, "faces": faces}
return DWOpenposeDetector2.draw_pose(
pose, H, W, draw_face=draw_face, draw_hands=draw_hands, draw_body=draw_body
)
@staticmethod
def draw_pose(
pose: Dict[str, NDArrayInt | Dict[str, NDArrayInt]],
H: int,
W: int,
draw_face: bool = True,
draw_body: bool = True,
draw_hands: bool = True,
) -> Image.Image:
"""Draws the pose on a black image and returns it as a PIL Image."""
bodies = pose["bodies"]
faces = pose["faces"]
hands = pose["hands"]
assert isinstance(bodies, dict)
candidate = bodies["candidate"]
assert isinstance(bodies, dict)
subset = bodies["subset"]
canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8)
if draw_body:
canvas = draw_bodypose(canvas, candidate, subset)
if draw_hands:
assert isinstance(hands, np.ndarray)
canvas = draw_handpose(canvas, hands)
if draw_face:
assert isinstance(hands, np.ndarray)
canvas = draw_facepose(canvas, faces) # type: ignore
dwpose_image = np_to_pil(canvas)
return dwpose_image
__all__ = ["DWPOSE_MODELS", "DWOpenposeDetector"]

View File

@@ -1,9 +1,6 @@
# Adapted from https://github.com/huggingface/controlnet_aux
import pathlib
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
import cv2
import huggingface_hub
import numpy as np
import torch
from einops import rearrange
@@ -143,74 +140,3 @@ class HEDProcessor:
detected_map[detected_map < 255] = 0
return np_to_pil(detected_map)
class HEDEdgeDetector:
"""Simple wrapper around the HED model for detecting edges in an image."""
hf_repo_id = "lllyasviel/Annotators"
hf_filename = "ControlNetHED.pth"
def __init__(self, model: ControlNetHED_Apache2):
self.model = model
@classmethod
def get_model_url(cls) -> str:
"""Get the URL to download the model from the Hugging Face Hub."""
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename)
@classmethod
def load_model(cls, model_path: pathlib.Path) -> ControlNetHED_Apache2:
"""Load the model from a file."""
model = ControlNetHED_Apache2()
model.load_state_dict(torch.load(model_path, map_location="cpu"))
model.float().eval()
return model
def to(self, device: torch.device):
self.model.to(device)
return self
def run(self, image: Image.Image, safe: bool = False, scribble: bool = False) -> Image.Image:
"""Processes an image and returns the detected edges.
Args:
image: The input image.
safe: Whether to apply safe step to the detected edges.
scribble: Whether to apply non-maximum suppression and Gaussian blur to the detected edges.
Returns:
The detected edges.
"""
device = next(iter(self.model.parameters())).device
np_image = pil_to_np(image)
height, width, _channels = np_image.shape
with torch.no_grad():
image_hed = torch.from_numpy(np_image.copy()).float().to(device)
image_hed = rearrange(image_hed, "h w c -> 1 c h w")
edges = self.model(image_hed)
edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
edges = [cv2.resize(e, (width, height), interpolation=cv2.INTER_LINEAR) for e in edges]
edges = np.stack(edges, axis=2)
edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
if safe:
edge = safe_step(edge)
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
detected_map = edge
detected_map = cv2.resize(detected_map, (width, height), interpolation=cv2.INTER_LINEAR)
if scribble:
detected_map = nms(detected_map, 127, 3.0)
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
detected_map[detected_map > 4] = 255
detected_map[detected_map < 255] = 0
output = np_to_pil(detected_map)
return output

View File

@@ -1,9 +1,6 @@
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
import pathlib
import cv2
import huggingface_hub
import numpy as np
import torch
import torch.nn as nn
@@ -159,69 +156,3 @@ class LineartProcessor:
detected_map = 255 - detected_map
return np_to_pil(detected_map)
class LineartEdgeDetector:
"""Simple wrapper around the fine and coarse lineart models for detecting edges in an image."""
hf_repo_id = "lllyasviel/Annotators"
hf_filename_fine = "sk_model.pth"
hf_filename_coarse = "sk_model2.pth"
@classmethod
def get_model_url(cls, coarse: bool = False) -> str:
"""Get the URL to download the model from the Hugging Face Hub."""
if coarse:
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename_coarse)
else:
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename_fine)
@classmethod
def load_model(cls, model_path: pathlib.Path) -> Generator:
"""Load the model from a file."""
model = Generator(3, 1, 3)
model.load_state_dict(torch.load(model_path, map_location="cpu"))
model.float().eval()
return model
def __init__(self, model: Generator) -> None:
self.model = model
def to(self, device: torch.device):
self.model.to(device)
return self
def run(self, image: Image.Image) -> Image.Image:
"""Detects edges in the input image with the selected lineart model.
Args:
input: The input image.
coarse: Whether to use the coarse model.
Returns:
The detected edges.
"""
device = next(iter(self.model.parameters())).device
np_image = pil_to_np(image)
with torch.no_grad():
np_image = torch.from_numpy(np_image).float().to(device)
np_image = np_image / 255.0
np_image = rearrange(np_image, "h w c -> 1 c h w")
line = self.model(np_image)[0][0]
line = line.cpu().numpy()
line = (line * 255.0).clip(0, 255).astype(np.uint8)
detected_map = 255 - line
# The lineart model often outputs a lot of almost-black noise. SD1.5 ControlNets seem to be OK with this, but
# SDXL ControlNets are not - they need a cleaner map. 12 was experimentally determined to be a good threshold,
# eliminating all the noise while keeping the actual edges. Other approaches to thresholding may be better,
# for example stretching the contrast or removing noise.
detected_map[detected_map < 12] = 0
output = np_to_pil(detected_map)
return output

View File

@@ -1,11 +1,9 @@
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
import functools
import pathlib
from typing import Optional
import cv2
import huggingface_hub
import numpy as np
import torch
import torch.nn as nn
@@ -203,71 +201,3 @@ class LineartAnimeProcessor:
detected_map = 255 - detected_map
return np_to_pil(detected_map)
class LineartAnimeEdgeDetector:
"""Simple wrapper around the Lineart Anime model for detecting edges in an image."""
hf_repo_id = "lllyasviel/Annotators"
hf_filename = "netG.pth"
@classmethod
def get_model_url(cls) -> str:
"""Get the URL to download the model from the Hugging Face Hub."""
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename)
@classmethod
def load_model(cls, model_path: pathlib.Path) -> UnetGenerator:
"""Load the model from a file."""
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
model = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False)
ckpt = torch.load(model_path)
for key in list(ckpt.keys()):
if "module." in key:
ckpt[key.replace("module.", "")] = ckpt[key]
del ckpt[key]
model.load_state_dict(ckpt)
model.eval()
return model
def __init__(self, model: UnetGenerator) -> None:
self.model = model
def to(self, device: torch.device):
self.model.to(device)
return self
def run(self, image: Image.Image) -> Image.Image:
"""Processes an image and returns the detected edges."""
device = next(iter(self.model.parameters())).device
np_image = pil_to_np(image)
height, width, _channels = np_image.shape
new_height = 256 * int(np.ceil(float(height) / 256.0))
new_width = 256 * int(np.ceil(float(width) / 256.0))
resized_img = cv2.resize(np_image, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
with torch.no_grad():
image_feed = torch.from_numpy(resized_img).float().to(device)
image_feed = image_feed / 127.5 - 1.0
image_feed = rearrange(image_feed, "h w c -> 1 c h w")
line = self.model(image_feed)[0, 0] * 127.5 + 127.5
line = line.cpu().numpy()
line = cv2.resize(line, (width, height), interpolation=cv2.INTER_CUBIC)
line = line.clip(0, 255).astype(np.uint8)
detected_map = 255 - line
# The lineart model often outputs a lot of almost-black noise. SD1.5 ControlNets seem to be OK with this, but
# SDXL ControlNets are not - they need a cleaner map. 12 was experimentally determined to be a good threshold,
# eliminating all the noise while keeping the actual edges. Other approaches to thresholding may be better,
# for example stretching the contrast or removing noise.
detected_map[detected_map < 12] = 0
output = np_to_pil(detected_map)
return output

View File

@@ -1,15 +0,0 @@
# Adapted from https://github.com/huggingface/controlnet_aux
from PIL import Image
from invokeai.backend.image_util.mediapipe_face.mediapipe_face_common import generate_annotation
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
def detect_faces(image: Image.Image, max_faces: int = 1, min_confidence: float = 0.5) -> Image.Image:
"""Detects faces in an image using MediaPipe."""
np_img = pil_to_np(image)
detected_map = generate_annotation(np_img, max_faces, min_confidence)
detected_map_pil = np_to_pil(detected_map)
return detected_map_pil

View File

@@ -1,149 +0,0 @@
from typing import Mapping
import mediapipe as mp
import numpy
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_detection = mp.solutions.face_detection # Only for counting faces.
mp_face_mesh = mp.solutions.face_mesh
mp_face_connections = mp.solutions.face_mesh_connections.FACEMESH_TESSELATION
mp_hand_connections = mp.solutions.hands_connections.HAND_CONNECTIONS
mp_body_connections = mp.solutions.pose_connections.POSE_CONNECTIONS
DrawingSpec = mp.solutions.drawing_styles.DrawingSpec
PoseLandmark = mp.solutions.drawing_styles.PoseLandmark
min_face_size_pixels: int = 64
f_thick = 2
f_rad = 1
right_iris_draw = DrawingSpec(color=(10, 200, 250), thickness=f_thick, circle_radius=f_rad)
right_eye_draw = DrawingSpec(color=(10, 200, 180), thickness=f_thick, circle_radius=f_rad)
right_eyebrow_draw = DrawingSpec(color=(10, 220, 180), thickness=f_thick, circle_radius=f_rad)
left_iris_draw = DrawingSpec(color=(250, 200, 10), thickness=f_thick, circle_radius=f_rad)
left_eye_draw = DrawingSpec(color=(180, 200, 10), thickness=f_thick, circle_radius=f_rad)
left_eyebrow_draw = DrawingSpec(color=(180, 220, 10), thickness=f_thick, circle_radius=f_rad)
mouth_draw = DrawingSpec(color=(10, 180, 10), thickness=f_thick, circle_radius=f_rad)
head_draw = DrawingSpec(color=(10, 200, 10), thickness=f_thick, circle_radius=f_rad)
# mp_face_mesh.FACEMESH_CONTOURS has all the items we care about.
face_connection_spec = {}
for edge in mp_face_mesh.FACEMESH_FACE_OVAL:
face_connection_spec[edge] = head_draw
for edge in mp_face_mesh.FACEMESH_LEFT_EYE:
face_connection_spec[edge] = left_eye_draw
for edge in mp_face_mesh.FACEMESH_LEFT_EYEBROW:
face_connection_spec[edge] = left_eyebrow_draw
# for edge in mp_face_mesh.FACEMESH_LEFT_IRIS:
# face_connection_spec[edge] = left_iris_draw
for edge in mp_face_mesh.FACEMESH_RIGHT_EYE:
face_connection_spec[edge] = right_eye_draw
for edge in mp_face_mesh.FACEMESH_RIGHT_EYEBROW:
face_connection_spec[edge] = right_eyebrow_draw
# for edge in mp_face_mesh.FACEMESH_RIGHT_IRIS:
# face_connection_spec[edge] = right_iris_draw
for edge in mp_face_mesh.FACEMESH_LIPS:
face_connection_spec[edge] = mouth_draw
iris_landmark_spec = {468: right_iris_draw, 473: left_iris_draw}
def draw_pupils(image, landmark_list, drawing_spec, halfwidth: int = 2):
"""We have a custom function to draw the pupils because the mp.draw_landmarks method requires a parameter for all
landmarks. Until our PR is merged into mediapipe, we need this separate method."""
if len(image.shape) != 3:
raise ValueError("Input image must be H,W,C.")
image_rows, image_cols, image_channels = image.shape
if image_channels != 3: # BGR channels
raise ValueError("Input image must contain three channel bgr data.")
for idx, landmark in enumerate(landmark_list.landmark):
if (landmark.HasField("visibility") and landmark.visibility < 0.9) or (
landmark.HasField("presence") and landmark.presence < 0.5
):
continue
if landmark.x >= 1.0 or landmark.x < 0 or landmark.y >= 1.0 or landmark.y < 0:
continue
image_x = int(image_cols * landmark.x)
image_y = int(image_rows * landmark.y)
draw_color = None
if isinstance(drawing_spec, Mapping):
if drawing_spec.get(idx) is None:
continue
else:
draw_color = drawing_spec[idx].color
elif isinstance(drawing_spec, DrawingSpec):
draw_color = drawing_spec.color
image[image_y - halfwidth : image_y + halfwidth, image_x - halfwidth : image_x + halfwidth, :] = draw_color
def reverse_channels(image):
"""Given a numpy array in RGB form, convert to BGR. Will also convert from BGR to RGB."""
# im[:,:,::-1] is a neat hack to convert BGR to RGB by reversing the indexing order.
# im[:,:,::[2,1,0]] would also work but makes a copy of the data.
return image[:, :, ::-1]
def generate_annotation(img_rgb, max_faces: int, min_confidence: float):
"""
Find up to 'max_faces' inside the provided input image.
If min_face_size_pixels is provided and nonzero it will be used to filter faces that occupy less than this many
pixels in the image.
"""
with mp_face_mesh.FaceMesh(
static_image_mode=True,
max_num_faces=max_faces,
refine_landmarks=True,
min_detection_confidence=min_confidence,
) as facemesh:
img_height, img_width, img_channels = img_rgb.shape
assert img_channels == 3
results = facemesh.process(img_rgb).multi_face_landmarks
if results is None:
print("No faces detected in controlnet image for Mediapipe face annotator.")
return numpy.zeros_like(img_rgb)
# Filter faces that are too small
filtered_landmarks = []
for lm in results:
landmarks = lm.landmark
face_rect = [
landmarks[0].x,
landmarks[0].y,
landmarks[0].x,
landmarks[0].y,
] # Left, up, right, down.
for i in range(len(landmarks)):
face_rect[0] = min(face_rect[0], landmarks[i].x)
face_rect[1] = min(face_rect[1], landmarks[i].y)
face_rect[2] = max(face_rect[2], landmarks[i].x)
face_rect[3] = max(face_rect[3], landmarks[i].y)
if min_face_size_pixels > 0:
face_width = abs(face_rect[2] - face_rect[0])
face_height = abs(face_rect[3] - face_rect[1])
face_width_pixels = face_width * img_width
face_height_pixels = face_height * img_height
face_size = min(face_width_pixels, face_height_pixels)
if face_size >= min_face_size_pixels:
filtered_landmarks.append(lm)
else:
filtered_landmarks.append(lm)
# Annotations are drawn in BGR for some reason, but we don't need to flip a zero-filled image at the start.
empty = numpy.zeros_like(img_rgb)
# Draw detected faces:
for face_landmarks in filtered_landmarks:
mp_drawing.draw_landmarks(
empty,
face_landmarks,
connections=face_connection_spec.keys(),
landmark_drawing_spec=None,
connection_drawing_spec=face_connection_spec,
)
draw_pupils(empty, face_landmarks, iris_landmark_spec, 2)
# Flip BGR back to RGB.
empty = reverse_channels(empty).copy()
return empty

View File

@@ -1,66 +0,0 @@
# Adapted from https://github.com/huggingface/controlnet_aux
import pathlib
import cv2
import huggingface_hub
import numpy as np
import torch
from PIL import Image
from invokeai.backend.image_util.mlsd.models.mbv2_mlsd_large import MobileV2_MLSD_Large
from invokeai.backend.image_util.mlsd.utils import pred_lines
from invokeai.backend.image_util.util import np_to_pil, pil_to_np, resize_to_multiple
class MLSDDetector:
"""Simple wrapper around a MLSD model for detecting edges as line segments in an image."""
hf_repo_id = "lllyasviel/ControlNet"
hf_filename = "annotator/ckpts/mlsd_large_512_fp32.pth"
@classmethod
def get_model_url(cls) -> str:
"""Get the URL to download the model from the Hugging Face Hub."""
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename)
@classmethod
def load_model(cls, model_path: pathlib.Path) -> MobileV2_MLSD_Large:
"""Load the model from a file."""
model = MobileV2_MLSD_Large()
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
return model
def __init__(self, model: MobileV2_MLSD_Large) -> None:
self.model = model
def to(self, device: torch.device):
self.model.to(device)
return self
def run(self, image: Image.Image, score_threshold: float = 0.1, distance_threshold: float = 20.0) -> Image.Image:
"""Processes an image and returns the detected edges."""
np_img = pil_to_np(image)
height, width, _channels = np_img.shape
# This model requires the input image to have a resolution that is a multiple of 64
np_img = resize_to_multiple(np_img, 64)
img_output = np.zeros_like(np_img)
with torch.no_grad():
lines = pred_lines(np_img, self.model, [np_img.shape[0], np_img.shape[1]], score_threshold, distance_threshold)
for line in lines:
x_start, y_start, x_end, y_end = [int(val) for val in line]
cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
detected_map = img_output[:, :, 0]
# Back to the original size
output_image = cv2.resize(detected_map, (width, height), interpolation=cv2.INTER_LINEAR)
return np_to_pil(output_image)

View File

@@ -1,290 +0,0 @@
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.nn import functional as F
class BlockTypeA(nn.Module):
def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
super(BlockTypeA, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c2, out_c2, kernel_size=1),
nn.BatchNorm2d(out_c2),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_c1, out_c1, kernel_size=1),
nn.BatchNorm2d(out_c1),
nn.ReLU(inplace=True)
)
self.upscale = upscale
def forward(self, a, b):
b = self.conv1(b)
a = self.conv2(a)
if self.upscale:
b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
return torch.cat((a, b), dim=1)
class BlockTypeB(nn.Module):
def __init__(self, in_c, out_c):
super(BlockTypeB, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
nn.BatchNorm2d(in_c),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
nn.BatchNorm2d(out_c),
nn.ReLU()
)
def forward(self, x):
x = self.conv1(x) + x
x = self.conv2(x)
return x
class BlockTypeC(nn.Module):
def __init__(self, in_c, out_c):
super(BlockTypeC, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
nn.BatchNorm2d(in_c),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
nn.BatchNorm2d(in_c),
nn.ReLU()
)
self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
self.channel_pad = out_planes - in_planes
self.stride = stride
#padding = (kernel_size - 1) // 2
# TFLite uses slightly different padding than PyTorch
if stride == 2:
padding = 0
else:
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
def forward(self, x):
# TFLite uses different padding
if self.stride == 2:
x = F.pad(x, (0, 1, 0, 1), "constant", 0)
#print(x.shape)
for module in self:
if not isinstance(module, nn.MaxPool2d):
x = module(x)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, pretrained=True):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
width_mult = 1.0
round_nearest = 8
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
#[6, 160, 3, 2],
#[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(4, input_channel, stride=2)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
self.features = nn.Sequential(*features)
self.fpn_selected = [1, 3, 6, 10, 13]
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
if pretrained:
self._load_pretrained_model()
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
fpn_features = []
for i, f in enumerate(self.features):
if i > self.fpn_selected[-1]:
break
x = f(x)
if i in self.fpn_selected:
fpn_features.append(x)
c1, c2, c3, c4, c5 = fpn_features
return c1, c2, c3, c4, c5
def forward(self, x):
return self._forward_impl(x)
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
class MobileV2_MLSD_Large(nn.Module):
def __init__(self):
super(MobileV2_MLSD_Large, self).__init__()
self.backbone = MobileNetV2(pretrained=False)
## A, B
self.block15 = BlockTypeA(in_c1= 64, in_c2= 96,
out_c1= 64, out_c2=64,
upscale=False)
self.block16 = BlockTypeB(128, 64)
## A, B
self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64,
out_c1= 64, out_c2= 64)
self.block18 = BlockTypeB(128, 64)
## A, B
self.block19 = BlockTypeA(in_c1=24, in_c2=64,
out_c1=64, out_c2=64)
self.block20 = BlockTypeB(128, 64)
## A, B, C
self.block21 = BlockTypeA(in_c1=16, in_c2=64,
out_c1=64, out_c2=64)
self.block22 = BlockTypeB(128, 64)
self.block23 = BlockTypeC(64, 16)
def forward(self, x):
c1, c2, c3, c4, c5 = self.backbone(x)
x = self.block15(c4, c5)
x = self.block16(x)
x = self.block17(c3, x)
x = self.block18(x)
x = self.block19(c2, x)
x = self.block20(x)
x = self.block21(c1, x)
x = self.block22(x)
x = self.block23(x)
x = x[:, 7:, :, :]
return x

View File

@@ -1,273 +0,0 @@
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.nn import functional as F
class BlockTypeA(nn.Module):
def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
super(BlockTypeA, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c2, out_c2, kernel_size=1),
nn.BatchNorm2d(out_c2),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_c1, out_c1, kernel_size=1),
nn.BatchNorm2d(out_c1),
nn.ReLU(inplace=True)
)
self.upscale = upscale
def forward(self, a, b):
b = self.conv1(b)
a = self.conv2(a)
b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
return torch.cat((a, b), dim=1)
class BlockTypeB(nn.Module):
def __init__(self, in_c, out_c):
super(BlockTypeB, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
nn.BatchNorm2d(in_c),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
nn.BatchNorm2d(out_c),
nn.ReLU()
)
def forward(self, x):
x = self.conv1(x) + x
x = self.conv2(x)
return x
class BlockTypeC(nn.Module):
def __init__(self, in_c, out_c):
super(BlockTypeC, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
nn.BatchNorm2d(in_c),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
nn.BatchNorm2d(in_c),
nn.ReLU()
)
self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
self.channel_pad = out_planes - in_planes
self.stride = stride
#padding = (kernel_size - 1) // 2
# TFLite uses slightly different padding than PyTorch
if stride == 2:
padding = 0
else:
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
def forward(self, x):
# TFLite uses different padding
if self.stride == 2:
x = F.pad(x, (0, 1, 0, 1), "constant", 0)
#print(x.shape)
for module in self:
if not isinstance(module, nn.MaxPool2d):
x = module(x)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, pretrained=True):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
width_mult = 1.0
round_nearest = 8
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
#[6, 96, 3, 1],
#[6, 160, 3, 2],
#[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(4, input_channel, stride=2)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
self.features = nn.Sequential(*features)
self.fpn_selected = [3, 6, 10]
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
#if pretrained:
# self._load_pretrained_model()
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
fpn_features = []
for i, f in enumerate(self.features):
if i > self.fpn_selected[-1]:
break
x = f(x)
if i in self.fpn_selected:
fpn_features.append(x)
c2, c3, c4 = fpn_features
return c2, c3, c4
def forward(self, x):
return self._forward_impl(x)
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
class MobileV2_MLSD_Tiny(nn.Module):
def __init__(self):
super(MobileV2_MLSD_Tiny, self).__init__()
self.backbone = MobileNetV2(pretrained=True)
self.block12 = BlockTypeA(in_c1= 32, in_c2= 64,
out_c1= 64, out_c2=64)
self.block13 = BlockTypeB(128, 64)
self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64,
out_c1= 32, out_c2= 32)
self.block15 = BlockTypeB(64, 64)
self.block16 = BlockTypeC(64, 16)
def forward(self, x):
c2, c3, c4 = self.backbone(x)
x = self.block12(c3, c4)
x = self.block13(x)
x = self.block14(c2, x)
x = self.block15(x)
x = self.block16(x)
x = x[:, 7:, :, :]
#print(x.shape)
x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True)
return x

View File

@@ -1,587 +0,0 @@
'''
modified by lihaoweicv
pytorch version
'''
'''
M-LSD
Copyright 2021-present NAVER Corp.
Apache License v2.0
'''
import cv2
import numpy as np
import torch
from torch.nn import functional as F
def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
'''
tpMap:
center: tpMap[1, 0, :, :]
displacement: tpMap[1, 1:5, :, :]
'''
b, c, h, w = tpMap.shape
assert b==1, 'only support bsize==1'
displacement = tpMap[:, 1:5, :, :][0]
center = tpMap[:, 0, :, :]
heat = torch.sigmoid(center)
hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
keep = (hmax == heat).float()
heat = heat * keep
heat = heat.reshape(-1, )
scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
yy = torch.floor_divide(indices, w).unsqueeze(-1)
xx = torch.fmod(indices, w).unsqueeze(-1)
ptss = torch.cat((yy, xx),dim=-1)
ptss = ptss.detach().cpu().numpy()
scores = scores.detach().cpu().numpy()
displacement = displacement.detach().cpu().numpy()
displacement = displacement.transpose((1,2,0))
return ptss, scores, displacement
def pred_lines(image, model,
input_shape=[512, 512],
score_thr=0.10,
dist_thr=20.0):
h, w, _ = image.shape
device = next(iter(model.parameters())).device
h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
resized_image = resized_image.transpose((2,0,1))
batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
batch_image = (batch_image / 127.5) - 1.0
batch_image = torch.from_numpy(batch_image).float()
batch_image = batch_image.to(device)
outputs = model(batch_image)
pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
start = vmap[:, :, :2]
end = vmap[:, :, 2:]
dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
segments_list = []
for center, score in zip(pts, pts_score, strict=False):
y, x = center
distance = dist_map[y, x]
if score > score_thr and distance > dist_thr:
disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
x_start = x + disp_x_start
y_start = y + disp_y_start
x_end = x + disp_x_end
y_end = y + disp_y_end
segments_list.append([x_start, y_start, x_end, y_end])
if segments_list:
lines = 2 * np.array(segments_list) # 256 > 512
lines[:, 0] = lines[:, 0] * w_ratio
lines[:, 1] = lines[:, 1] * h_ratio
lines[:, 2] = lines[:, 2] * w_ratio
lines[:, 3] = lines[:, 3] * h_ratio
else:
# No segments detected - return empty array
lines = np.array([])
return lines
def pred_squares(image,
model,
input_shape=[512, 512],
params={'score': 0.06,
'outside_ratio': 0.28,
'inside_ratio': 0.45,
'w_overlap': 0.0,
'w_degree': 1.95,
'w_length': 0.0,
'w_area': 1.86,
'w_center': 0.14}):
'''
shape = [height, width]
'''
h, w, _ = image.shape
original_shape = [h, w]
device = next(iter(model.parameters())).device
resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
resized_image = resized_image.transpose((2, 0, 1))
batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
batch_image = (batch_image / 127.5) - 1.0
batch_image = torch.from_numpy(batch_image).float().to(device)
outputs = model(batch_image)
pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
start = vmap[:, :, :2] # (x, y)
end = vmap[:, :, 2:] # (x, y)
dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
junc_list = []
segments_list = []
for junc, score in zip(pts, pts_score, strict=False):
y, x = junc
distance = dist_map[y, x]
if score > params['score'] and distance > 20.0:
junc_list.append([x, y])
disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
d_arrow = 1.0
x_start = x + d_arrow * disp_x_start
y_start = y + d_arrow * disp_y_start
x_end = x + d_arrow * disp_x_end
y_end = y + d_arrow * disp_y_end
segments_list.append([x_start, y_start, x_end, y_end])
segments = np.array(segments_list)
####### post processing for squares
# 1. get unique lines
point = np.array([[0, 0]])
point = point[0]
start = segments[:, :2]
end = segments[:, 2:]
diff = start - end
a = diff[:, 1]
b = -diff[:, 0]
c = a * start[:, 0] + b * start[:, 1]
d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
theta[theta < 0.0] += 180
hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
d_quant = 1
theta_quant = 2
hough[:, 0] //= d_quant
hough[:, 1] //= theta_quant
_, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
yx_indices = hough[indices, :].astype('int32')
acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
acc_map_np = acc_map
# acc_map = acc_map[None, :, :, None]
#
# ### fast suppression using tensorflow op
# acc_map = tf.constant(acc_map, dtype=tf.float32)
# max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
# acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
# flatten_acc_map = tf.reshape(acc_map, [1, -1])
# topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
# _, h, w, _ = acc_map.shape
# y = tf.expand_dims(topk_indices // w, axis=-1)
# x = tf.expand_dims(topk_indices % w, axis=-1)
# yx = tf.concat([y, x], axis=-1)
### fast suppression using pytorch op
acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
_,_, h, w = acc_map.shape
max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2)
acc_map = acc_map * ( (acc_map == max_acc_map).float() )
flatten_acc_map = acc_map.reshape([-1, ])
scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
xx = torch.fmod(indices, w).unsqueeze(-1)
yx = torch.cat((yy, xx), dim=-1)
yx = yx.detach().cpu().numpy()
topk_values = scores.detach().cpu().numpy()
indices = idx_map[yx[:, 0], yx[:, 1]]
basis = 5 // 2
merged_segments = []
for yx_pt, max_indice, value in zip(yx, indices, topk_values, strict=False):
y, x = yx_pt
if max_indice == -1 or value == 0:
continue
segment_list = []
for y_offset in range(-basis, basis + 1):
for x_offset in range(-basis, basis + 1):
indice = idx_map[y + y_offset, x + x_offset]
cnt = int(acc_map_np[y + y_offset, x + x_offset])
if indice != -1:
segment_list.append(segments[indice])
if cnt > 1:
check_cnt = 1
current_hough = hough[indice]
for new_indice, new_hough in enumerate(hough):
if (current_hough == new_hough).all() and indice != new_indice:
segment_list.append(segments[new_indice])
check_cnt += 1
if check_cnt == cnt:
break
group_segments = np.array(segment_list).reshape([-1, 2])
sorted_group_segments = np.sort(group_segments, axis=0)
x_min, y_min = sorted_group_segments[0, :]
x_max, y_max = sorted_group_segments[-1, :]
deg = theta[max_indice]
if deg >= 90:
merged_segments.append([x_min, y_max, x_max, y_min])
else:
merged_segments.append([x_min, y_min, x_max, y_max])
# 2. get intersections
new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
start = new_segments[:, :2] # (x1, y1)
end = new_segments[:, 2:] # (x2, y2)
new_centers = (start + end) / 2.0
diff = start - end
dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
# ax + by = c
a = diff[:, 1]
b = -diff[:, 0]
c = a * start[:, 0] + b * start[:, 1]
pre_det = a[:, None] * b[None, :]
det = pre_det - np.transpose(pre_det)
pre_inter_y = a[:, None] * c[None, :]
inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
pre_inter_x = c[:, None] * b[None, :]
inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
# 3. get corner information
# 3.1 get distance
'''
dist_segments:
| dist(0), dist(1), dist(2), ...|
dist_inter_to_segment1:
| dist(inter,0), dist(inter,0), dist(inter,0), ... |
| dist(inter,1), dist(inter,1), dist(inter,1), ... |
...
dist_inter_to_semgnet2:
| dist(inter,0), dist(inter,1), dist(inter,2), ... |
| dist(inter,0), dist(inter,1), dist(inter,2), ... |
...
'''
dist_inter_to_segment1_start = np.sqrt(
np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
dist_inter_to_segment1_end = np.sqrt(
np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
dist_inter_to_segment2_start = np.sqrt(
np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
dist_inter_to_segment2_end = np.sqrt(
np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
# sort ascending
dist_inter_to_segment1 = np.sort(
np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
axis=-1) # [n_batch, n_batch, 2]
dist_inter_to_segment2 = np.sort(
np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
axis=-1) # [n_batch, n_batch, 2]
# 3.2 get degree
inter_to_start = new_centers[:, None, :] - inter_pts
deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
deg_inter_to_start[deg_inter_to_start < 0.0] += 360
inter_to_end = new_centers[None, :, :] - inter_pts
deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
deg_inter_to_end[deg_inter_to_end < 0.0] += 360
'''
B -- G
| |
C -- R
B : blue / G: green / C: cyan / R: red
0 -- 1
| |
3 -- 2
'''
# rename variables
deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
# sort deg ascending
deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
deg_diff_map = np.abs(deg1_map - deg2_map)
# we only consider the smallest degree of intersect
deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
# define available degree range
deg_range = [60, 120]
corner_dict = {corner_info: [] for corner_info in range(4)}
inter_points = []
for i in range(inter_pts.shape[0]):
for j in range(i + 1, inter_pts.shape[1]):
# i, j > line index, always i < j
x, y = inter_pts[i, j, :]
deg1, deg2 = deg_sort[i, j, :]
deg_diff = deg_diff_map[i, j]
check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
(dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
(dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
if check_degree and check_distance:
corner_info = None
if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
(deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
corner_info, color_info = 0, 'blue'
elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
corner_info, color_info = 1, 'green'
elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
corner_info, color_info = 2, 'black'
elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
(deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
corner_info, color_info = 3, 'cyan'
else:
corner_info, color_info = 4, 'red' # we don't use it
continue
corner_dict[corner_info].append([x, y, i, j])
inter_points.append([x, y])
square_list = []
connect_list = []
segments_list = []
for corner0 in corner_dict[0]:
for corner1 in corner_dict[1]:
connect01 = False
for corner0_line in corner0[2:]:
if corner0_line in corner1[2:]:
connect01 = True
break
if connect01:
for corner2 in corner_dict[2]:
connect12 = False
for corner1_line in corner1[2:]:
if corner1_line in corner2[2:]:
connect12 = True
break
if connect12:
for corner3 in corner_dict[3]:
connect23 = False
for corner2_line in corner2[2:]:
if corner2_line in corner3[2:]:
connect23 = True
break
if connect23:
for corner3_line in corner3[2:]:
if corner3_line in corner0[2:]:
# SQUARE!!!
'''
0 -- 1
| |
3 -- 2
square_list:
order: 0 > 1 > 2 > 3
| x0, y0, x1, y1, x2, y2, x3, y3 |
| x0, y0, x1, y1, x2, y2, x3, y3 |
...
connect_list:
order: 01 > 12 > 23 > 30
| line_idx01, line_idx12, line_idx23, line_idx30 |
| line_idx01, line_idx12, line_idx23, line_idx30 |
...
segments_list:
order: 0 > 1 > 2 > 3
| line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
| line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
...
'''
square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
def check_outside_inside(segments_info, connect_idx):
# return 'outside or inside', min distance, cover_param, peri_param
if connect_idx == segments_info[0]:
check_dist_mat = dist_inter_to_segment1
else:
check_dist_mat = dist_inter_to_segment2
i, j = segments_info
min_dist, max_dist = check_dist_mat[i, j, :]
connect_dist = dist_segments[connect_idx]
if max_dist > connect_dist:
return 'outside', min_dist, 0, 1
else:
return 'inside', min_dist, -1, -1
top_square = None
try:
map_size = input_shape[0] / 2
squares = np.array(square_list).reshape([-1, 4, 2])
score_array = []
connect_array = np.array(connect_list)
segments_array = np.array(segments_list).reshape([-1, 4, 2])
# get degree of corners:
squares_rollup = np.roll(squares, 1, axis=1)
squares_rolldown = np.roll(squares, -1, axis=1)
vec1 = squares_rollup - squares
normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
vec2 = squares_rolldown - squares
normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
# get square score
overlap_scores = []
degree_scores = []
length_scores = []
for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree, strict=False):
'''
0 -- 1
| |
3 -- 2
# segments: [4, 2]
# connects: [4]
'''
###################################### OVERLAP SCORES
cover = 0
perimeter = 0
# check 0 > 1 > 2 > 3
square_length = []
for start_idx in range(4):
end_idx = (start_idx + 1) % 4
connect_idx = connects[start_idx] # segment idx of segment01
start_segments = segments[start_idx]
end_segments = segments[end_idx]
start_point = square[start_idx]
end_point = square[end_idx]
# check whether outside or inside
start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
connect_idx)
end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
square_length.append(
dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
overlap_scores.append(cover / perimeter)
######################################
###################################### DEGREE SCORES
'''
deg0 vs deg2
deg1 vs deg3
'''
deg0, deg1, deg2, deg3 = degree
deg_ratio1 = deg0 / deg2
if deg_ratio1 > 1.0:
deg_ratio1 = 1 / deg_ratio1
deg_ratio2 = deg1 / deg3
if deg_ratio2 > 1.0:
deg_ratio2 = 1 / deg_ratio2
degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
######################################
###################################### LENGTH SCORES
'''
len0 vs len2
len1 vs len3
'''
len0, len1, len2, len3 = square_length
len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
length_scores.append((len_ratio1 + len_ratio2) / 2)
######################################
overlap_scores = np.array(overlap_scores)
overlap_scores /= np.max(overlap_scores)
degree_scores = np.array(degree_scores)
# degree_scores /= np.max(degree_scores)
length_scores = np.array(length_scores)
###################################### AREA SCORES
area_scores = np.reshape(squares, [-1, 4, 2])
area_x = area_scores[:, :, 0]
area_y = area_scores[:, :, 1]
correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
area_scores = 0.5 * np.abs(area_scores + correction)
area_scores /= (map_size * map_size) # np.max(area_scores)
######################################
###################################### CENTER SCORES
centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
# squares: [n, 4, 2]
square_centers = np.mean(squares, axis=1) # [n, 2]
center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
center_scores = center2center / (map_size / np.sqrt(2.0))
'''
score_w = [overlap, degree, area, center, length]
'''
score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
score_array = params['w_overlap'] * overlap_scores \
+ params['w_degree'] * degree_scores \
+ params['w_area'] * area_scores \
- params['w_center'] * center_scores \
+ params['w_length'] * length_scores
best_square = []
sorted_idx = np.argsort(score_array)[::-1]
score_array = score_array[sorted_idx]
squares = squares[sorted_idx]
except Exception:
pass
'''return list
merged_lines, squares, scores
'''
try:
new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
except:
new_segments = []
try:
squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
except:
squares = []
score_array = []
try:
inter_points = np.array(inter_points)
inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
except:
inter_points = []
return new_segments, squares, score_array, inter_points

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2022 Caroline Chan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,93 +0,0 @@
# Adapted from https://github.com/huggingface/controlnet_aux
import pathlib
import types
import cv2
import huggingface_hub
import numpy as np
import torch
import torchvision.transforms as transforms
from einops import rearrange
from PIL import Image
from invokeai.backend.image_util.normal_bae.nets.NNET import NNET
from invokeai.backend.image_util.util import np_to_pil, pil_to_np, resize_to_multiple
class NormalMapDetector:
"""Simple wrapper around the Normal BAE model for normal map generation."""
hf_repo_id = "lllyasviel/Annotators"
hf_filename = "scannet.pt"
@classmethod
def get_model_url(cls) -> str:
"""Get the URL to download the model from the Hugging Face Hub."""
return huggingface_hub.hf_hub_url(cls.hf_repo_id, cls.hf_filename)
@classmethod
def load_model(cls, model_path: pathlib.Path) -> NNET:
"""Load the model from a file."""
args = types.SimpleNamespace()
args.mode = "client"
args.architecture = "BN"
args.pretrained = "scannet"
args.sampling_ratio = 0.4
args.importance_ratio = 0.7
model = NNET(args)
ckpt = torch.load(model_path, map_location="cpu")["model"]
load_dict = {}
for k, v in ckpt.items():
if k.startswith("module."):
k_ = k.replace("module.", "")
load_dict[k_] = v
else:
load_dict[k] = v
model.load_state_dict(load_dict)
model.eval()
return model
def __init__(self, model: NNET) -> None:
self.model = model
self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def to(self, device: torch.device):
self.model.to(device)
return self
def run(self, image: Image.Image):
"""Processes an image and returns the detected normal map."""
device = next(iter(self.model.parameters())).device
np_image = pil_to_np(image)
height, width, _channels = np_image.shape
# The model requires the image to be a multiple of 8
np_image = resize_to_multiple(np_image, 8)
image_normal = np_image
with torch.no_grad():
image_normal = torch.from_numpy(image_normal).float().to(device)
image_normal = image_normal / 255.0
image_normal = rearrange(image_normal, "h w c -> 1 c h w")
image_normal = self.norm(image_normal)
normal = self.model(image_normal)
normal = normal[0][-1][:, :3]
normal = ((normal + 1) * 0.5).clip(0, 1)
normal = rearrange(normal[0], "c h w -> h w c").cpu().numpy()
normal_image = (normal * 255.0).clip(0, 255).astype(np.uint8)
# Back to the original size
output_image = cv2.resize(normal_image, (width, height), interpolation=cv2.INTER_LINEAR)
return np_to_pil(output_image)

View File

@@ -1,22 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodules.encoder import Encoder
from .submodules.decoder import Decoder
class NNET(nn.Module):
def __init__(self, args):
super(NNET, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder(args)
def get_1x_lr_params(self): # lr/10 learning rate
return self.encoder.parameters()
def get_10x_lr_params(self): # lr learning rate
return self.decoder.parameters()
def forward(self, img, **kwargs):
return self.decoder(self.encoder(img), **kwargs)

View File

@@ -1,85 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodules.submodules import UpSampleBN, norm_normalize
# This is the baseline encoder-decoder we used in the ablation study
class NNET(nn.Module):
def __init__(self, args=None):
super(NNET, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder(num_classes=4)
def forward(self, x, **kwargs):
out = self.decoder(self.encoder(x), **kwargs)
# Bilinearly upsample the output to match the input resolution
up_out = F.interpolate(out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False)
# L2-normalize the first three channels / ensure positive value for concentration parameters (kappa)
up_out = norm_normalize(up_out)
return up_out
def get_1x_lr_params(self): # lr/10 learning rate
return self.encoder.parameters()
def get_10x_lr_params(self): # lr learning rate
modules = [self.decoder]
for m in modules:
yield from m.parameters()
# Encoder
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
basemodel_name = 'tf_efficientnet_b5_ap'
basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True)
# Remove last layer
basemodel.global_pool = nn.Identity()
basemodel.classifier = nn.Identity()
self.original_model = basemodel
def forward(self, x):
features = [x]
for k, v in self.original_model._modules.items():
if (k == 'blocks'):
for ki, vi in v._modules.items():
features.append(vi(features[-1]))
else:
features.append(v(features[-1]))
return features
# Decoder (no pixel-wise MLP, no uncertainty-guided sampling)
class Decoder(nn.Module):
def __init__(self, num_classes=4):
super(Decoder, self).__init__()
self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024)
self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512)
self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256)
self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128)
self.conv3 = nn.Conv2d(128, num_classes, kernel_size=3, stride=1, padding=1)
def forward(self, features):
x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11]
x_d0 = self.conv2(x_block4)
x_d1 = self.up1(x_d0, x_block3)
x_d2 = self.up2(x_d1, x_block2)
x_d3 = self.up3(x_d2, x_block1)
x_d4 = self.up4(x_d3, x_block0)
out = self.conv3(x_d4)
return out
if __name__ == '__main__':
model = Baseline()
x = torch.rand(2, 3, 480, 640)
out = model(x)
print(out.shape)

View File

@@ -1,202 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodules import UpSampleBN, UpSampleGN, norm_normalize, sample_points
class Decoder(nn.Module):
def __init__(self, args):
super(Decoder, self).__init__()
# hyper-parameter for sampling
self.sampling_ratio = args.sampling_ratio
self.importance_ratio = args.importance_ratio
# feature-map
self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
if args.architecture == 'BN':
self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024)
self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512)
self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256)
self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128)
elif args.architecture == 'GN':
self.up1 = UpSampleGN(skip_input=2048 + 176, output_features=1024)
self.up2 = UpSampleGN(skip_input=1024 + 64, output_features=512)
self.up3 = UpSampleGN(skip_input=512 + 40, output_features=256)
self.up4 = UpSampleGN(skip_input=256 + 24, output_features=128)
else:
raise Exception('invalid architecture')
# produces 1/8 res output
self.out_conv_res8 = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
# produces 1/4 res output
self.out_conv_res4 = nn.Sequential(
nn.Conv1d(512 + 4, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 4, kernel_size=1),
)
# produces 1/2 res output
self.out_conv_res2 = nn.Sequential(
nn.Conv1d(256 + 4, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 4, kernel_size=1),
)
# produces 1/1 res output
self.out_conv_res1 = nn.Sequential(
nn.Conv1d(128 + 4, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
nn.Conv1d(128, 4, kernel_size=1),
)
def forward(self, features, gt_norm_mask=None, mode='test'):
x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11]
# generate feature-map
x_d0 = self.conv2(x_block4) # x_d0 : [2, 2048, 15, 20] 1/32 res
x_d1 = self.up1(x_d0, x_block3) # x_d1 : [2, 1024, 30, 40] 1/16 res
x_d2 = self.up2(x_d1, x_block2) # x_d2 : [2, 512, 60, 80] 1/8 res
x_d3 = self.up3(x_d2, x_block1) # x_d3: [2, 256, 120, 160] 1/4 res
x_d4 = self.up4(x_d3, x_block0) # x_d4: [2, 128, 240, 320] 1/2 res
# 1/8 res output
out_res8 = self.out_conv_res8(x_d2) # out_res8: [2, 4, 60, 80] 1/8 res output
out_res8 = norm_normalize(out_res8) # out_res8: [2, 4, 60, 80] 1/8 res output
################################################################################################################
# out_res4
################################################################################################################
if mode == 'train':
# upsampling ... out_res8: [2, 4, 60, 80] -> out_res8_res4: [2, 4, 120, 160]
out_res8_res4 = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True)
B, _, H, W = out_res8_res4.shape
# samples: [B, 1, N, 2]
point_coords_res4, rows_int, cols_int = sample_points(out_res8_res4.detach(), gt_norm_mask,
sampling_ratio=self.sampling_ratio,
beta=self.importance_ratio)
# output (needed for evaluation / visualization)
out_res4 = out_res8_res4
# grid_sample feature-map
feat_res4 = F.grid_sample(x_d2, point_coords_res4, mode='bilinear', align_corners=True) # (B, 512, 1, N)
init_pred = F.grid_sample(out_res8, point_coords_res4, mode='bilinear', align_corners=True) # (B, 4, 1, N)
feat_res4 = torch.cat([feat_res4, init_pred], dim=1) # (B, 512+4, 1, N)
# prediction (needed to compute loss)
samples_pred_res4 = self.out_conv_res4(feat_res4[:, :, 0, :]) # (B, 4, N)
samples_pred_res4 = norm_normalize(samples_pred_res4) # (B, 4, N) - normalized
for i in range(B):
out_res4[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res4[i, :, :]
else:
# grid_sample feature-map
feat_map = F.interpolate(x_d2, scale_factor=2, mode='bilinear', align_corners=True)
init_pred = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True)
feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
B, _, H, W = feat_map.shape
# try all pixels
out_res4 = self.out_conv_res4(feat_map.view(B, 512 + 4, -1)) # (B, 4, N)
out_res4 = norm_normalize(out_res4) # (B, 4, N) - normalized
out_res4 = out_res4.view(B, 4, H, W)
samples_pred_res4 = point_coords_res4 = None
################################################################################################################
# out_res2
################################################################################################################
if mode == 'train':
# upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320]
out_res4_res2 = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True)
B, _, H, W = out_res4_res2.shape
# samples: [B, 1, N, 2]
point_coords_res2, rows_int, cols_int = sample_points(out_res4_res2.detach(), gt_norm_mask,
sampling_ratio=self.sampling_ratio,
beta=self.importance_ratio)
# output (needed for evaluation / visualization)
out_res2 = out_res4_res2
# grid_sample feature-map
feat_res2 = F.grid_sample(x_d3, point_coords_res2, mode='bilinear', align_corners=True) # (B, 256, 1, N)
init_pred = F.grid_sample(out_res4, point_coords_res2, mode='bilinear', align_corners=True) # (B, 4, 1, N)
feat_res2 = torch.cat([feat_res2, init_pred], dim=1) # (B, 256+4, 1, N)
# prediction (needed to compute loss)
samples_pred_res2 = self.out_conv_res2(feat_res2[:, :, 0, :]) # (B, 4, N)
samples_pred_res2 = norm_normalize(samples_pred_res2) # (B, 4, N) - normalized
for i in range(B):
out_res2[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res2[i, :, :]
else:
# grid_sample feature-map
feat_map = F.interpolate(x_d3, scale_factor=2, mode='bilinear', align_corners=True)
init_pred = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True)
feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
B, _, H, W = feat_map.shape
out_res2 = self.out_conv_res2(feat_map.view(B, 256 + 4, -1)) # (B, 4, N)
out_res2 = norm_normalize(out_res2) # (B, 4, N) - normalized
out_res2 = out_res2.view(B, 4, H, W)
samples_pred_res2 = point_coords_res2 = None
################################################################################################################
# out_res1
################################################################################################################
if mode == 'train':
# upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320]
out_res2_res1 = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True)
B, _, H, W = out_res2_res1.shape
# samples: [B, 1, N, 2]
point_coords_res1, rows_int, cols_int = sample_points(out_res2_res1.detach(), gt_norm_mask,
sampling_ratio=self.sampling_ratio,
beta=self.importance_ratio)
# output (needed for evaluation / visualization)
out_res1 = out_res2_res1
# grid_sample feature-map
feat_res1 = F.grid_sample(x_d4, point_coords_res1, mode='bilinear', align_corners=True) # (B, 128, 1, N)
init_pred = F.grid_sample(out_res2, point_coords_res1, mode='bilinear', align_corners=True) # (B, 4, 1, N)
feat_res1 = torch.cat([feat_res1, init_pred], dim=1) # (B, 128+4, 1, N)
# prediction (needed to compute loss)
samples_pred_res1 = self.out_conv_res1(feat_res1[:, :, 0, :]) # (B, 4, N)
samples_pred_res1 = norm_normalize(samples_pred_res1) # (B, 4, N) - normalized
for i in range(B):
out_res1[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res1[i, :, :]
else:
# grid_sample feature-map
feat_map = F.interpolate(x_d4, scale_factor=2, mode='bilinear', align_corners=True)
init_pred = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True)
feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
B, _, H, W = feat_map.shape
out_res1 = self.out_conv_res1(feat_map.view(B, 128 + 4, -1)) # (B, 4, N)
out_res1 = norm_normalize(out_res1) # (B, 4, N) - normalized
out_res1 = out_res1.view(B, 4, H, W)
samples_pred_res1 = point_coords_res1 = None
return [out_res8, out_res4, out_res2, out_res1], \
[out_res8, samples_pred_res4, samples_pred_res2, samples_pred_res1], \
[None, point_coords_res4, point_coords_res2, point_coords_res1]

View File

@@ -1,109 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# pytorch stuff
*.pth
*.onnx
*.pb
trained_models/
.fuse_hidden*

View File

@@ -1,555 +0,0 @@
# Model Performance Benchmarks
All benchmarks run as per:
```
python onnx_export.py --model mobilenetv3_100 ./mobilenetv3_100.onnx
python onnx_optimize.py ./mobilenetv3_100.onnx --output mobilenetv3_100-opt.onnx
python onnx_to_caffe.py ./mobilenetv3_100.onnx --c2-prefix mobilenetv3
python onnx_to_caffe.py ./mobilenetv3_100-opt.onnx --c2-prefix mobilenetv3-opt
python caffe2_benchmark.py --c2-init ./mobilenetv3.init.pb --c2-predict ./mobilenetv3.predict.pb
python caffe2_benchmark.py --c2-init ./mobilenetv3-opt.init.pb --c2-predict ./mobilenetv3-opt.predict.pb
```
## EfficientNet-B0
### Unoptimized
```
Main run finished. Milliseconds per iter: 49.2862. Iters per second: 20.2897
Time per operator type:
29.7378 ms. 60.5145%. Conv
12.1785 ms. 24.7824%. Sigmoid
3.62811 ms. 7.38297%. SpatialBN
2.98444 ms. 6.07314%. Mul
0.326902 ms. 0.665225%. AveragePool
0.197317 ms. 0.401528%. FC
0.0852877 ms. 0.173555%. Add
0.0032607 ms. 0.00663532%. Squeeze
49.1416 ms in Total
FLOP per operator type:
0.76907 GFLOP. 95.2696%. Conv
0.0269508 GFLOP. 3.33857%. SpatialBN
0.00846444 GFLOP. 1.04855%. Mul
0.002561 GFLOP. 0.317248%. FC
0.000210112 GFLOP. 0.0260279%. Add
0.807256 GFLOP in Total
Feature Memory Read per operator type:
58.5253 MB. 43.0891%. Mul
43.2015 MB. 31.807%. Conv
27.2869 MB. 20.0899%. SpatialBN
5.12912 MB. 3.77631%. FC
1.6809 MB. 1.23756%. Add
135.824 MB in Total
Feature Memory Written per operator type:
33.8578 MB. 38.1965%. Mul
26.9881 MB. 30.4465%. Conv
26.9508 MB. 30.4044%. SpatialBN
0.840448 MB. 0.948147%. Add
0.004 MB. 0.00451258%. FC
88.6412 MB in Total
Parameter Memory per operator type:
15.8248 MB. 74.9391%. Conv
5.124 MB. 24.265%. FC
0.168064 MB. 0.795877%. SpatialBN
0 MB. 0%. Add
0 MB. 0%. Mul
21.1168 MB in Total
```
### Optimized
```
Main run finished. Milliseconds per iter: 46.0838. Iters per second: 21.6996
Time per operator type:
29.776 ms. 65.002%. Conv
12.2803 ms. 26.8084%. Sigmoid
3.15073 ms. 6.87815%. Mul
0.328651 ms. 0.717456%. AveragePool
0.186237 ms. 0.406563%. FC
0.0832429 ms. 0.181722%. Add
0.0026184 ms. 0.00571606%. Squeeze
45.8078 ms in Total
FLOP per operator type:
0.76907 GFLOP. 98.5601%. Conv
0.00846444 GFLOP. 1.08476%. Mul
0.002561 GFLOP. 0.328205%. FC
0.000210112 GFLOP. 0.0269269%. Add
0.780305 GFLOP in Total
Feature Memory Read per operator type:
58.5253 MB. 53.8803%. Mul
43.2855 MB. 39.8501%. Conv
5.12912 MB. 4.72204%. FC
1.6809 MB. 1.54749%. Add
108.621 MB in Total
Feature Memory Written per operator type:
33.8578 MB. 54.8834%. Mul
26.9881 MB. 43.7477%. Conv
0.840448 MB. 1.36237%. Add
0.004 MB. 0.00648399%. FC
61.6904 MB in Total
Parameter Memory per operator type:
15.8248 MB. 75.5403%. Conv
5.124 MB. 24.4597%. FC
0 MB. 0%. Add
0 MB. 0%. Mul
20.9488 MB in Total
```
## EfficientNet-B1
### Optimized
```
Main run finished. Milliseconds per iter: 71.8102. Iters per second: 13.9256
Time per operator type:
45.7915 ms. 66.3206%. Conv
17.8718 ms. 25.8841%. Sigmoid
4.44132 ms. 6.43244%. Mul
0.51001 ms. 0.738658%. AveragePool
0.233283 ms. 0.337868%. Add
0.194986 ms. 0.282402%. FC
0.00268255 ms. 0.00388519%. Squeeze
69.0456 ms in Total
FLOP per operator type:
1.37105 GFLOP. 98.7673%. Conv
0.0138759 GFLOP. 0.99959%. Mul
0.002561 GFLOP. 0.184489%. FC
0.000674432 GFLOP. 0.0485847%. Add
1.38816 GFLOP in Total
Feature Memory Read per operator type:
94.624 MB. 54.0789%. Mul
69.8255 MB. 39.9062%. Conv
5.39546 MB. 3.08357%. Add
5.12912 MB. 2.93136%. FC
174.974 MB in Total
Feature Memory Written per operator type:
55.5035 MB. 54.555%. Mul
43.5333 MB. 42.7894%. Conv
2.69773 MB. 2.65163%. Add
0.004 MB. 0.00393165%. FC
101.739 MB in Total
Parameter Memory per operator type:
25.7479 MB. 83.4024%. Conv
5.124 MB. 16.5976%. FC
0 MB. 0%. Add
0 MB. 0%. Mul
30.8719 MB in Total
```
## EfficientNet-B2
### Optimized
```
Main run finished. Milliseconds per iter: 92.28. Iters per second: 10.8366
Time per operator type:
61.4627 ms. 67.5845%. Conv
22.7458 ms. 25.0113%. Sigmoid
5.59931 ms. 6.15701%. Mul
0.642567 ms. 0.706568%. AveragePool
0.272795 ms. 0.299965%. Add
0.216178 ms. 0.237709%. FC
0.00268895 ms. 0.00295677%. Squeeze
90.942 ms in Total
FLOP per operator type:
1.98431 GFLOP. 98.9343%. Conv
0.0177039 GFLOP. 0.882686%. Mul
0.002817 GFLOP. 0.140451%. FC
0.000853984 GFLOP. 0.0425782%. Add
2.00568 GFLOP in Total
Feature Memory Read per operator type:
120.609 MB. 54.9637%. Mul
86.3512 MB. 39.3519%. Conv
6.83187 MB. 3.11341%. Add
5.64163 MB. 2.571%. FC
219.433 MB in Total
Feature Memory Written per operator type:
70.8155 MB. 54.6573%. Mul
55.3273 MB. 42.7031%. Conv
3.41594 MB. 2.63651%. Add
0.004 MB. 0.00308731%. FC
129.563 MB in Total
Parameter Memory per operator type:
30.4721 MB. 84.3913%. Conv
5.636 MB. 15.6087%. FC
0 MB. 0%. Add
0 MB. 0%. Mul
36.1081 MB in Total
```
## MixNet-M
### Optimized
```
Main run finished. Milliseconds per iter: 63.1122. Iters per second: 15.8448
Time per operator type:
48.1139 ms. 75.2052%. Conv
7.1341 ms. 11.1511%. Sigmoid
2.63706 ms. 4.12189%. SpatialBN
1.73186 ms. 2.70701%. Mul
1.38707 ms. 2.16809%. Split
1.29322 ms. 2.02139%. Concat
1.00093 ms. 1.56452%. Relu
0.235309 ms. 0.367803%. Add
0.221579 ms. 0.346343%. FC
0.219315 ms. 0.342803%. AveragePool
0.00250145 ms. 0.00390993%. Squeeze
63.9768 ms in Total
FLOP per operator type:
0.675273 GFLOP. 95.5827%. Conv
0.0221072 GFLOP. 3.12921%. SpatialBN
0.00538445 GFLOP. 0.762152%. Mul
0.003073 GFLOP. 0.434973%. FC
0.000642488 GFLOP. 0.0909421%. Add
0 GFLOP. 0%. Concat
0 GFLOP. 0%. Relu
0.70648 GFLOP in Total
Feature Memory Read per operator type:
46.8424 MB. 30.502%. Conv
36.8626 MB. 24.0036%. Mul
22.3152 MB. 14.5309%. SpatialBN
22.1074 MB. 14.3955%. Concat
14.1496 MB. 9.21372%. Relu
6.15414 MB. 4.00735%. FC
5.1399 MB. 3.34692%. Add
153.571 MB in Total
Feature Memory Written per operator type:
32.7672 MB. 28.4331%. Conv
22.1072 MB. 19.1831%. Concat
22.1072 MB. 19.1831%. SpatialBN
21.5378 MB. 18.689%. Mul
14.1496 MB. 12.2781%. Relu
2.56995 MB. 2.23003%. Add
0.004 MB. 0.00347092%. FC
115.243 MB in Total
Parameter Memory per operator type:
13.7059 MB. 68.674%. Conv
6.148 MB. 30.8049%. FC
0.104 MB. 0.521097%. SpatialBN
0 MB. 0%. Add
0 MB. 0%. Concat
0 MB. 0%. Mul
0 MB. 0%. Relu
19.9579 MB in Total
```
## TF MobileNet-V3 Large 1.0
### Optimized
```
Main run finished. Milliseconds per iter: 22.0495. Iters per second: 45.3525
Time per operator type:
17.437 ms. 80.0087%. Conv
1.27662 ms. 5.8577%. Add
1.12759 ms. 5.17387%. Div
0.701155 ms. 3.21721%. Mul
0.562654 ms. 2.58171%. Relu
0.431144 ms. 1.97828%. Clip
0.156902 ms. 0.719936%. FC
0.0996858 ms. 0.457402%. AveragePool
0.00112455 ms. 0.00515993%. Flatten
21.7939 ms in Total
FLOP per operator type:
0.43062 GFLOP. 98.1484%. Conv
0.002561 GFLOP. 0.583713%. FC
0.00210867 GFLOP. 0.480616%. Mul
0.00193868 GFLOP. 0.441871%. Add
0.00151532 GFLOP. 0.345377%. Div
0 GFLOP. 0%. Relu
0.438743 GFLOP in Total
Feature Memory Read per operator type:
34.7967 MB. 43.9391%. Conv
14.496 MB. 18.3046%. Mul
9.44828 MB. 11.9307%. Add
9.26157 MB. 11.6949%. Relu
6.0614 MB. 7.65395%. Div
5.12912 MB. 6.47673%. FC
79.193 MB in Total
Feature Memory Written per operator type:
17.6247 MB. 35.8656%. Conv
9.26157 MB. 18.847%. Relu
8.43469 MB. 17.1643%. Mul
7.75472 MB. 15.7806%. Add
6.06128 MB. 12.3345%. Div
0.004 MB. 0.00813985%. FC
49.1409 MB in Total
Parameter Memory per operator type:
16.6851 MB. 76.5052%. Conv
5.124 MB. 23.4948%. FC
0 MB. 0%. Add
0 MB. 0%. Div
0 MB. 0%. Mul
0 MB. 0%. Relu
21.8091 MB in Total
```
## MobileNet-V3 (RW)
### Unoptimized
```
Main run finished. Milliseconds per iter: 24.8316. Iters per second: 40.2712
Time per operator type:
15.9266 ms. 69.2624%. Conv
2.36551 ms. 10.2873%. SpatialBN
1.39102 ms. 6.04936%. Add
1.30327 ms. 5.66773%. Div
0.737014 ms. 3.20517%. Mul
0.639697 ms. 2.78195%. Relu
0.375681 ms. 1.63378%. Clip
0.153126 ms. 0.665921%. FC
0.0993787 ms. 0.432184%. AveragePool
0.0032632 ms. 0.0141912%. Squeeze
22.9946 ms in Total
FLOP per operator type:
0.430616 GFLOP. 94.4041%. Conv
0.0175992 GFLOP. 3.85829%. SpatialBN
0.002561 GFLOP. 0.561449%. FC
0.00210961 GFLOP. 0.46249%. Mul
0.00173891 GFLOP. 0.381223%. Add
0.00151626 GFLOP. 0.33241%. Div
0 GFLOP. 0%. Relu
0.456141 GFLOP in Total
Feature Memory Read per operator type:
34.7354 MB. 36.4363%. Conv
17.7944 MB. 18.6658%. SpatialBN
14.5035 MB. 15.2137%. Mul
9.25778 MB. 9.71113%. Relu
7.84641 MB. 8.23064%. Add
6.06516 MB. 6.36216%. Div
5.12912 MB. 5.38029%. FC
95.3317 MB in Total
Feature Memory Written per operator type:
17.6246 MB. 26.7264%. Conv
17.5992 MB. 26.6878%. SpatialBN
9.25778 MB. 14.0387%. Relu
8.43843 MB. 12.7962%. Mul
6.95565 MB. 10.5477%. Add
6.06502 MB. 9.19713%. Div
0.004 MB. 0.00606568%. FC
65.9447 MB in Total
Parameter Memory per operator type:
16.6778 MB. 76.1564%. Conv
5.124 MB. 23.3979%. FC
0.0976 MB. 0.445674%. SpatialBN
0 MB. 0%. Add
0 MB. 0%. Div
0 MB. 0%. Mul
0 MB. 0%. Relu
21.8994 MB in Total
```
### Optimized
```
Main run finished. Milliseconds per iter: 22.0981. Iters per second: 45.2527
Time per operator type:
17.146 ms. 78.8965%. Conv
1.38453 ms. 6.37084%. Add
1.30991 ms. 6.02749%. Div
0.685417 ms. 3.15391%. Mul
0.532589 ms. 2.45068%. Relu
0.418263 ms. 1.92461%. Clip
0.15128 ms. 0.696106%. FC
0.102065 ms. 0.469648%. AveragePool
0.0022143 ms. 0.010189%. Squeeze
21.7323 ms in Total
FLOP per operator type:
0.430616 GFLOP. 98.1927%. Conv
0.002561 GFLOP. 0.583981%. FC
0.00210961 GFLOP. 0.481051%. Mul
0.00173891 GFLOP. 0.396522%. Add
0.00151626 GFLOP. 0.34575%. Div
0 GFLOP. 0%. Relu
0.438542 GFLOP in Total
Feature Memory Read per operator type:
34.7842 MB. 44.833%. Conv
14.5035 MB. 18.6934%. Mul
9.25778 MB. 11.9323%. Relu
7.84641 MB. 10.1132%. Add
6.06516 MB. 7.81733%. Div
5.12912 MB. 6.61087%. FC
77.5861 MB in Total
Feature Memory Written per operator type:
17.6246 MB. 36.4556%. Conv
9.25778 MB. 19.1492%. Relu
8.43843 MB. 17.4544%. Mul
6.95565 MB. 14.3874%. Add
6.06502 MB. 12.5452%. Div
0.004 MB. 0.00827378%. FC
48.3455 MB in Total
Parameter Memory per operator type:
16.6778 MB. 76.4973%. Conv
5.124 MB. 23.5027%. FC
0 MB. 0%. Add
0 MB. 0%. Div
0 MB. 0%. Mul
0 MB. 0%. Relu
21.8018 MB in Total
```
## MnasNet-A1
### Unoptimized
```
Main run finished. Milliseconds per iter: 30.0892. Iters per second: 33.2345
Time per operator type:
24.4656 ms. 79.0905%. Conv
4.14958 ms. 13.4144%. SpatialBN
1.60598 ms. 5.19169%. Relu
0.295219 ms. 0.95436%. Mul
0.187609 ms. 0.606486%. FC
0.120556 ms. 0.389724%. AveragePool
0.09036 ms. 0.292109%. Add
0.015727 ms. 0.050841%. Sigmoid
0.00306205 ms. 0.00989875%. Squeeze
30.9337 ms in Total
FLOP per operator type:
0.620598 GFLOP. 95.6434%. Conv
0.0248873 GFLOP. 3.8355%. SpatialBN
0.002561 GFLOP. 0.394688%. FC
0.000597408 GFLOP. 0.0920695%. Mul
0.000222656 GFLOP. 0.0343146%. Add
0 GFLOP. 0%. Relu
0.648867 GFLOP in Total
Feature Memory Read per operator type:
35.5457 MB. 38.4109%. Conv
25.1552 MB. 27.1829%. SpatialBN
22.5235 MB. 24.339%. Relu
5.12912 MB. 5.54256%. FC
2.40586 MB. 2.59978%. Mul
1.78125 MB. 1.92483%. Add
92.5406 MB in Total
Feature Memory Written per operator type:
24.9042 MB. 32.9424%. Conv
24.8873 MB. 32.92%. SpatialBN
22.5235 MB. 29.7932%. Relu
2.38963 MB. 3.16092%. Mul
0.890624 MB. 1.17809%. Add
0.004 MB. 0.00529106%. FC
75.5993 MB in Total
Parameter Memory per operator type:
10.2732 MB. 66.1459%. Conv
5.124 MB. 32.9917%. FC
0.133952 MB. 0.86247%. SpatialBN
0 MB. 0%. Add
0 MB. 0%. Mul
0 MB. 0%. Relu
15.5312 MB in Total
```
### Optimized
```
Main run finished. Milliseconds per iter: 24.2367. Iters per second: 41.2597
Time per operator type:
22.0547 ms. 91.1375%. Conv
1.49096 ms. 6.16116%. Relu
0.253417 ms. 1.0472%. Mul
0.18506 ms. 0.76473%. FC
0.112942 ms. 0.466717%. AveragePool
0.086769 ms. 0.358559%. Add
0.0127889 ms. 0.0528479%. Sigmoid
0.0027346 ms. 0.0113003%. Squeeze
24.1994 ms in Total
FLOP per operator type:
0.620598 GFLOP. 99.4581%. Conv
0.002561 GFLOP. 0.41043%. FC
0.000597408 GFLOP. 0.0957417%. Mul
0.000222656 GFLOP. 0.0356832%. Add
0 GFLOP. 0%. Relu
0.623979 GFLOP in Total
Feature Memory Read per operator type:
35.6127 MB. 52.7968%. Conv
22.5235 MB. 33.3917%. Relu
5.12912 MB. 7.60406%. FC
2.40586 MB. 3.56675%. Mul
1.78125 MB. 2.64075%. Add
67.4524 MB in Total
Feature Memory Written per operator type:
24.9042 MB. 49.1092%. Conv
22.5235 MB. 44.4145%. Relu
2.38963 MB. 4.71216%. Mul
0.890624 MB. 1.75624%. Add
0.004 MB. 0.00788768%. FC
50.712 MB in Total
Parameter Memory per operator type:
10.2732 MB. 66.7213%. Conv
5.124 MB. 33.2787%. FC
0 MB. 0%. Add
0 MB. 0%. Mul
0 MB. 0%. Relu
15.3972 MB in Total
```
## MnasNet-B1
### Unoptimized
```
Main run finished. Milliseconds per iter: 28.3109. Iters per second: 35.322
Time per operator type:
29.1121 ms. 83.3081%. Conv
4.14959 ms. 11.8746%. SpatialBN
1.35823 ms. 3.88675%. Relu
0.186188 ms. 0.532802%. FC
0.116244 ms. 0.332647%. Add
0.018641 ms. 0.0533437%. AveragePool
0.0040904 ms. 0.0117052%. Squeeze
34.9451 ms in Total
FLOP per operator type:
0.626272 GFLOP. 96.2088%. Conv
0.0218266 GFLOP. 3.35303%. SpatialBN
0.002561 GFLOP. 0.393424%. FC
0.000291648 GFLOP. 0.0448034%. Add
0 GFLOP. 0%. Relu
0.650951 GFLOP in Total
Feature Memory Read per operator type:
34.4354 MB. 41.3788%. Conv
22.1299 MB. 26.5921%. SpatialBN
19.1923 MB. 23.0622%. Relu
5.12912 MB. 6.16333%. FC
2.33318 MB. 2.80364%. Add
83.2199 MB in Total
Feature Memory Written per operator type:
21.8266 MB. 34.0955%. Conv
21.8266 MB. 34.0955%. SpatialBN
19.1923 MB. 29.9805%. Relu
1.16659 MB. 1.82234%. Add
0.004 MB. 0.00624844%. FC
64.016 MB in Total
Parameter Memory per operator type:
12.2576 MB. 69.9104%. Conv
5.124 MB. 29.2245%. FC
0.15168 MB. 0.865099%. SpatialBN
0 MB. 0%. Add
0 MB. 0%. Relu
17.5332 MB in Total
```
### Optimized
```
Main run finished. Milliseconds per iter: 26.6364. Iters per second: 37.5426
Time per operator type:
24.9888 ms. 94.0962%. Conv
1.26147 ms. 4.75011%. Relu
0.176234 ms. 0.663619%. FC
0.113309 ms. 0.426672%. Add
0.0138708 ms. 0.0522311%. AveragePool
0.00295685 ms. 0.0111341%. Squeeze
26.5566 ms in Total
FLOP per operator type:
0.626272 GFLOP. 99.5466%. Conv
0.002561 GFLOP. 0.407074%. FC
0.000291648 GFLOP. 0.0463578%. Add
0 GFLOP. 0%. Relu
0.629124 GFLOP in Total
Feature Memory Read per operator type:
34.5112 MB. 56.4224%. Conv
19.1923 MB. 31.3775%. Relu
5.12912 MB. 8.3856%. FC
2.33318 MB. 3.81452%. Add
61.1658 MB in Total
Feature Memory Written per operator type:
21.8266 MB. 51.7346%. Conv
19.1923 MB. 45.4908%. Relu
1.16659 MB. 2.76513%. Add
0.004 MB. 0.00948104%. FC
42.1895 MB in Total
Parameter Memory per operator type:
12.2576 MB. 70.5205%. Conv
5.124 MB. 29.4795%. FC
0 MB. 0%. Add
0 MB. 0%. Relu
17.3816 MB in Total
```

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 Ross Wightman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,323 +0,0 @@
# (Generic) EfficientNets for PyTorch
A 'generic' implementation of EfficientNet, MixNet, MobileNetV3, etc. that covers most of the compute/parameter efficient architectures derived from the MobileNet V1/V2 block sequence, including those found via automated neural architecture search.
All models are implemented by GenEfficientNet or MobileNetV3 classes, with string based architecture definitions to configure the block layouts (idea from [here](https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py))
## What's New
### Aug 19, 2020
* Add updated PyTorch trained EfficientNet-B3 weights trained by myself with `timm` (82.1 top-1)
* Add PyTorch trained EfficientNet-Lite0 contributed by [@hal-314](https://github.com/hal-314) (75.5 top-1)
* Update ONNX and Caffe2 export / utility scripts to work with latest PyTorch / ONNX
* ONNX runtime based validation script added
* activations (mostly) brought in sync with `timm` equivalents
### April 5, 2020
* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
* 3.5M param MobileNet-V2 100 @ 73%
* 4.5M param MobileNet-V2 110d @ 75%
* 6.1M param MobileNet-V2 140 @ 76.5%
* 5.8M param MobileNet-V2 120d @ 77.3%
### March 23, 2020
* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
* Add PyTorch trained MobileNet-V3 Large weights with 75.77% top-1
* IMPORTANT CHANGE (if training from scratch) - weight init changed to better match Tensorflow impl, set `fix_group_fanout=False` in `initialize_weight_goog` for old behavior
### Feb 12, 2020
* Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
* Port new EfficientNet-B8 (RandAugment) weights from TF TPU, these are different than the B8 AdvProp, different input normalization.
* Add RandAugment PyTorch trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin)
### Jan 22, 2020
* Update weights for EfficientNet B0, B2, B3 and MixNet-XL with latest RandAugment trained weights. Trained with (https://github.com/rwightman/pytorch-image-models)
* Fix torchscript compatibility for PyTorch 1.4, add torchscript support for MixedConv2d using ModuleDict
* Test models, torchscript, onnx export with PyTorch 1.4 -- no issues
### Nov 22, 2019
* New top-1 high! Ported official TF EfficientNet AdvProp (https://arxiv.org/abs/1911.09665) weights and B8 model spec. Created a new set of `ap` models since they use a different
preprocessing (Inception mean/std) from the original EfficientNet base/AA/RA weights.
### Nov 15, 2019
* Ported official TF MobileNet-V3 float32 large/small/minimalistic weights
* Modifications to MobileNet-V3 model and components to support some additional config needed for differences between TF MobileNet-V3 and mine
### Oct 30, 2019
* Many of the models will now work with torch.jit.script, MixNet being the biggest exception
* Improved interface for enabling torchscript or ONNX export compatible modes (via config)
* Add JIT optimized mem-efficient Swish/Mish autograd.fn in addition to memory-efficient autgrad.fn
* Activation factory to select best version of activation by name or override one globally
* Add pretrained checkpoint load helper that handles input conv and classifier changes
### Oct 27, 2019
* Add CondConv EfficientNet variants ported from https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv
* Add RandAug weights for TF EfficientNet B5 and B7 from https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
* Bring over MixNet-XL model and depth scaling algo from my pytorch-image-models code base
* Switch activations and global pooling to modules
* Add memory-efficient Swish/Mish impl
* Add as_sequential() method to all models and allow as an argument in entrypoint fns
* Move MobileNetV3 into own file since it has a different head
* Remove ChamNet, MobileNet V2/V1 since they will likely never be used here
## Models
Implemented models include:
* EfficientNet NoisyStudent (B0-B7, L2) (https://arxiv.org/abs/1911.04252)
* EfficientNet AdvProp (B0-B8) (https://arxiv.org/abs/1911.09665)
* EfficientNet (B0-B8) (https://arxiv.org/abs/1905.11946)
* EfficientNet-EdgeTPU (S, M, L) (https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html)
* EfficientNet-CondConv (https://arxiv.org/abs/1904.04971)
* EfficientNet-Lite (https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
* MixNet (https://arxiv.org/abs/1907.09595)
* MNASNet B1, A1 (Squeeze-Excite), and Small (https://arxiv.org/abs/1807.11626)
* MobileNet-V3 (https://arxiv.org/abs/1905.02244)
* FBNet-C (https://arxiv.org/abs/1812.03443)
* Single-Path NAS (https://arxiv.org/abs/1904.02877)
I originally implemented and trained some these models with code [here](https://github.com/rwightman/pytorch-image-models), this repository contains just the GenEfficientNet models, validation, and associated ONNX/Caffe2 export code.
## Pretrained
I've managed to train several of the models to accuracies close to or above the originating papers and official impl. My training code is here: https://github.com/rwightman/pytorch-image-models
|Model | Prec@1 (Err) | Prec@5 (Err) | Param#(M) | MAdds(M) | Image Scaling | Resolution | Crop |
|---|---|---|---|---|---|---|---|
| efficientnet_b3 | 82.240 (17.760) | 96.116 (3.884) | 12.23 | TBD | bicubic | 320 | 1.0 |
| efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | TBD | bicubic | 300 | 0.904 |
| mixnet_xl | 81.074 (18.926) | 95.282 (4.718) | 11.90 | TBD | bicubic | 256 | 1.0 |
| efficientnet_b2 | 80.612 (19.388) | 95.318 (4.682) | 9.1 | TBD | bicubic | 288 | 1.0 |
| mixnet_xl | 80.476 (19.524) | 94.936 (5.064) | 11.90 | TBD | bicubic | 224 | 0.875 |
| efficientnet_b2 | 80.288 (19.712) | 95.166 (4.834) | 9.1 | 1003 | bicubic | 260 | 0.890 |
| mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | TBD | bicubic | 224 | 0.875 |
| efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.8 | 694 | bicubic | 240 | 0.882 |
| efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | TBD | bicubic | 224 | 0.875 |
| efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.3 | 390 | bicubic | 224 | 0.875 |
| mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | TBD | bicubic | 224 | 0.875 |
| mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | 353 | bicubic | 224 | 0.875 |
| mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | TBD | bicubic | 224 | 0.875 |
| mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | TBD | bicubic | 224 | 0.875 |
| mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | TBD | bicubic | 224 | 0.875 |
| mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | 219 | bicubic | 224 | 0.875 |
| efficientnet_lite0 | 75.472 (24.528) | 92.520 (7.480) | 4.65 | TBD | bicubic | 224 | 0.875 |
| mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.9 | 312 | bicubic | 224 | 0.875 |
| fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | 385 | bilinear | 224 | 0.875 |
| mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | TBD | bicubic | 224 | 0.875 |
| mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.4 | 315 | bicubic | 224 | 0.875 |
| spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.4 | TBD | bilinear | 224 | 0.875 |
| mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | TBD | bicubic | 224 | 0.875 |
More pretrained models to come...
## Ported Weights
The weights ported from Tensorflow checkpoints for the EfficientNet models do pretty much match accuracy in Tensorflow once a SAME convolution padding equivalent is added, and the same crop factors, image scaling, etc (see table) are used via cmd line args.
**IMPORTANT:**
* Tensorflow ported weights for EfficientNet AdvProp (AP), EfficientNet EdgeTPU, EfficientNet-CondConv, EfficientNet-Lite, and MobileNet-V3 models use Inception style (0.5, 0.5, 0.5) for mean and std.
* Enabling the Tensorflow preprocessing pipeline with `--tf-preprocessing` at validation time will improve scores by 0.1-0.5%, very close to original TF impl.
To run validation for tf_efficientnet_b5:
`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b5 -b 64 --img-size 456 --crop-pct 0.934 --interpolation bicubic`
To run validation w/ TF preprocessing for tf_efficientnet_b5:
`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b5 -b 64 --img-size 456 --tf-preprocessing`
To run validation for a model with Inception preprocessing, ie EfficientNet-B8 AdvProp:
`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b8_ap -b 48 --num-gpu 2 --img-size 672 --crop-pct 0.954 --mean 0.5 --std 0.5`
|Model | Prec@1 (Err) | Prec@5 (Err) | Param # | Image Scaling | Image Size | Crop |
|---|---|---|---|---|---|---|
| tf_efficientnet_l2_ns *tfp | 88.352 (11.648) | 98.652 (1.348) | 480 | bicubic | 800 | N/A |
| tf_efficientnet_l2_ns | TBD | TBD | 480 | bicubic | 800 | 0.961 |
| tf_efficientnet_l2_ns_475 | 88.234 (11.766) | 98.546 (1.454) | 480 | bicubic | 475 | 0.936 |
| tf_efficientnet_l2_ns_475 *tfp | 88.172 (11.828) | 98.566 (1.434) | 480 | bicubic | 475 | N/A |
| tf_efficientnet_b7_ns *tfp | 86.844 (13.156) | 98.084 (1.916) | 66.35 | bicubic | 600 | N/A |
| tf_efficientnet_b7_ns | 86.840 (13.160) | 98.094 (1.906) | 66.35 | bicubic | 600 | N/A |
| tf_efficientnet_b6_ns | 86.452 (13.548) | 97.882 (2.118) | 43.04 | bicubic | 528 | N/A |
| tf_efficientnet_b6_ns *tfp | 86.444 (13.556) | 97.880 (2.120) | 43.04 | bicubic | 528 | N/A |
| tf_efficientnet_b5_ns *tfp | 86.064 (13.936) | 97.746 (2.254) | 30.39 | bicubic | 456 | N/A |
| tf_efficientnet_b5_ns | 86.088 (13.912) | 97.752 (2.248) | 30.39 | bicubic | 456 | N/A |
| tf_efficientnet_b8_ap *tfp | 85.436 (14.564) | 97.272 (2.728) | 87.4 | bicubic | 672 | N/A |
| tf_efficientnet_b8 *tfp | 85.384 (14.616) | 97.394 (2.606) | 87.4 | bicubic | 672 | N/A |
| tf_efficientnet_b8 | 85.370 (14.630) | 97.390 (2.610) | 87.4 | bicubic | 672 | 0.954 |
| tf_efficientnet_b8_ap | 85.368 (14.632) | 97.294 (2.706) | 87.4 | bicubic | 672 | 0.954 |
| tf_efficientnet_b4_ns *tfp | 85.298 (14.702) | 97.504 (2.496) | 19.34 | bicubic | 380 | N/A |
| tf_efficientnet_b4_ns | 85.162 (14.838) | 97.470 (2.530) | 19.34 | bicubic | 380 | 0.922 |
| tf_efficientnet_b7_ap *tfp | 85.154 (14.846) | 97.244 (2.756) | 66.35 | bicubic | 600 | N/A |
| tf_efficientnet_b7_ap | 85.118 (14.882) | 97.252 (2.748) | 66.35 | bicubic | 600 | 0.949 |
| tf_efficientnet_b7 *tfp | 84.940 (15.060) | 97.214 (2.786) | 66.35 | bicubic | 600 | N/A |
| tf_efficientnet_b7 | 84.932 (15.068) | 97.208 (2.792) | 66.35 | bicubic | 600 | 0.949 |
| tf_efficientnet_b6_ap | 84.786 (15.214) | 97.138 (2.862) | 43.04 | bicubic | 528 | 0.942 |
| tf_efficientnet_b6_ap *tfp | 84.760 (15.240) | 97.124 (2.876) | 43.04 | bicubic | 528 | N/A |
| tf_efficientnet_b5_ap *tfp | 84.276 (15.724) | 96.932 (3.068) | 30.39 | bicubic | 456 | N/A |
| tf_efficientnet_b5_ap | 84.254 (15.746) | 96.976 (3.024) | 30.39 | bicubic | 456 | 0.934 |
| tf_efficientnet_b6 *tfp | 84.140 (15.860) | 96.852 (3.148) | 43.04 | bicubic | 528 | N/A |
| tf_efficientnet_b6 | 84.110 (15.890) | 96.886 (3.114) | 43.04 | bicubic | 528 | 0.942 |
| tf_efficientnet_b3_ns *tfp | 84.054 (15.946) | 96.918 (3.082) | 12.23 | bicubic | 300 | N/A |
| tf_efficientnet_b3_ns | 84.048 (15.952) | 96.910 (3.090) | 12.23 | bicubic | 300 | .904 |
| tf_efficientnet_b5 *tfp | 83.822 (16.178) | 96.756 (3.244) | 30.39 | bicubic | 456 | N/A |
| tf_efficientnet_b5 | 83.812 (16.188) | 96.748 (3.252) | 30.39 | bicubic | 456 | 0.934 |
| tf_efficientnet_b4_ap *tfp | 83.278 (16.722) | 96.376 (3.624) | 19.34 | bicubic | 380 | N/A |
| tf_efficientnet_b4_ap | 83.248 (16.752) | 96.388 (3.612) | 19.34 | bicubic | 380 | 0.922 |
| tf_efficientnet_b4 | 83.022 (16.978) | 96.300 (3.700) | 19.34 | bicubic | 380 | 0.922 |
| tf_efficientnet_b4 *tfp | 82.948 (17.052) | 96.308 (3.692) | 19.34 | bicubic | 380 | N/A |
| tf_efficientnet_b2_ns *tfp | 82.436 (17.564) | 96.268 (3.732) | 9.11 | bicubic | 260 | N/A |
| tf_efficientnet_b2_ns | 82.380 (17.620) | 96.248 (3.752) | 9.11 | bicubic | 260 | 0.89 |
| tf_efficientnet_b3_ap *tfp | 81.882 (18.118) | 95.662 (4.338) | 12.23 | bicubic | 300 | N/A |
| tf_efficientnet_b3_ap | 81.828 (18.172) | 95.624 (4.376) | 12.23 | bicubic | 300 | 0.904 |
| tf_efficientnet_b3 | 81.636 (18.364) | 95.718 (4.282) | 12.23 | bicubic | 300 | 0.904 |
| tf_efficientnet_b3 *tfp | 81.576 (18.424) | 95.662 (4.338) | 12.23 | bicubic | 300 | N/A |
| tf_efficientnet_lite4 | 81.528 (18.472) | 95.668 (4.332) | 13.00 | bilinear | 380 | 0.92 |
| tf_efficientnet_b1_ns *tfp | 81.514 (18.486) | 95.776 (4.224) | 7.79 | bicubic | 240 | N/A |
| tf_efficientnet_lite4 *tfp | 81.502 (18.498) | 95.676 (4.324) | 13.00 | bilinear | 380 | N/A |
| tf_efficientnet_b1_ns | 81.388 (18.612) | 95.738 (4.262) | 7.79 | bicubic | 240 | 0.88 |
| tf_efficientnet_el | 80.534 (19.466) | 95.190 (4.810) | 10.59 | bicubic | 300 | 0.904 |
| tf_efficientnet_el *tfp | 80.476 (19.524) | 95.200 (4.800) | 10.59 | bicubic | 300 | N/A |
| tf_efficientnet_b2_ap *tfp | 80.420 (19.580) | 95.040 (4.960) | 9.11 | bicubic | 260 | N/A |
| tf_efficientnet_b2_ap | 80.306 (19.694) | 95.028 (4.972) | 9.11 | bicubic | 260 | 0.890 |
| tf_efficientnet_b2 *tfp | 80.188 (19.812) | 94.974 (5.026) | 9.11 | bicubic | 260 | N/A |
| tf_efficientnet_b2 | 80.086 (19.914) | 94.908 (5.092) | 9.11 | bicubic | 260 | 0.890 |
| tf_efficientnet_lite3 | 79.812 (20.188) | 94.914 (5.086) | 8.20 | bilinear | 300 | 0.904 |
| tf_efficientnet_lite3 *tfp | 79.734 (20.266) | 94.838 (5.162) | 8.20 | bilinear | 300 | N/A |
| tf_efficientnet_b1_ap *tfp | 79.532 (20.468) | 94.378 (5.622) | 7.79 | bicubic | 240 | N/A |
| tf_efficientnet_cc_b1_8e *tfp | 79.464 (20.536)| 94.492 (5.508) | 39.7 | bicubic | 240 | 0.88 |
| tf_efficientnet_cc_b1_8e | 79.298 (20.702) | 94.364 (5.636) | 39.7 | bicubic | 240 | 0.88 |
| tf_efficientnet_b1_ap | 79.278 (20.722) | 94.308 (5.692) | 7.79 | bicubic | 240 | 0.88 |
| tf_efficientnet_b1 *tfp | 79.172 (20.828) | 94.450 (5.550) | 7.79 | bicubic | 240 | N/A |
| tf_efficientnet_em *tfp | 78.958 (21.042) | 94.458 (5.542) | 6.90 | bicubic | 240 | N/A |
| tf_efficientnet_b0_ns *tfp | 78.806 (21.194) | 94.496 (5.504) | 5.29 | bicubic | 224 | N/A |
| tf_mixnet_l *tfp | 78.846 (21.154) | 94.212 (5.788) | 7.33 | bilinear | 224 | N/A |
| tf_efficientnet_b1 | 78.826 (21.174) | 94.198 (5.802) | 7.79 | bicubic | 240 | 0.88 |
| tf_mixnet_l | 78.770 (21.230) | 94.004 (5.996) | 7.33 | bicubic | 224 | 0.875 |
| tf_efficientnet_em | 78.742 (21.258) | 94.332 (5.668) | 6.90 | bicubic | 240 | 0.875 |
| tf_efficientnet_b0_ns | 78.658 (21.342) | 94.376 (5.624) | 5.29 | bicubic | 224 | 0.875 |
| tf_efficientnet_cc_b0_8e *tfp | 78.314 (21.686) | 93.790 (6.210) | 24.0 | bicubic | 224 | 0.875 |
| tf_efficientnet_cc_b0_8e | 77.908 (22.092) | 93.656 (6.344) | 24.0 | bicubic | 224 | 0.875 |
| tf_efficientnet_cc_b0_4e *tfp | 77.746 (22.254) | 93.552 (6.448) | 13.3 | bicubic | 224 | 0.875 |
| tf_efficientnet_cc_b0_4e | 77.304 (22.696) | 93.332 (6.668) | 13.3 | bicubic | 224 | 0.875 |
| tf_efficientnet_es *tfp | 77.616 (22.384) | 93.750 (6.250) | 5.44 | bicubic | 224 | N/A |
| tf_efficientnet_lite2 *tfp | 77.544 (22.456) | 93.800 (6.200) | 6.09 | bilinear | 260 | N/A |
| tf_efficientnet_lite2 | 77.460 (22.540) | 93.746 (6.254) | 6.09 | bicubic | 260 | 0.89 |
| tf_efficientnet_b0_ap *tfp | 77.514 (22.486) | 93.576 (6.424) | 5.29 | bicubic | 224 | N/A |
| tf_efficientnet_es | 77.264 (22.736) | 93.600 (6.400) | 5.44 | bicubic | 224 | N/A |
| tf_efficientnet_b0 *tfp | 77.258 (22.742) | 93.478 (6.522) | 5.29 | bicubic | 224 | N/A |
| tf_efficientnet_b0_ap | 77.084 (22.916) | 93.254 (6.746) | 5.29 | bicubic | 224 | 0.875 |
| tf_mixnet_m *tfp | 77.072 (22.928) | 93.368 (6.632) | 5.01 | bilinear | 224 | N/A |
| tf_mixnet_m | 76.950 (23.050) | 93.156 (6.844) | 5.01 | bicubic | 224 | 0.875 |
| tf_efficientnet_b0 | 76.848 (23.152) | 93.228 (6.772) | 5.29 | bicubic | 224 | 0.875 |
| tf_efficientnet_lite1 *tfp | 76.764 (23.236) | 93.326 (6.674) | 5.42 | bilinear | 240 | N/A |
| tf_efficientnet_lite1 | 76.638 (23.362) | 93.232 (6.768) | 5.42 | bicubic | 240 | 0.882 |
| tf_mixnet_s *tfp | 75.800 (24.200) | 92.788 (7.212) | 4.13 | bilinear | 224 | N/A |
| tf_mobilenetv3_large_100 *tfp | 75.768 (24.232) | 92.710 (7.290) | 5.48 | bilinear | 224 | N/A |
| tf_mixnet_s | 75.648 (24.352) | 92.636 (7.364) | 4.13 | bicubic | 224 | 0.875 |
| tf_mobilenetv3_large_100 | 75.516 (24.484) | 92.600 (7.400) | 5.48 | bilinear | 224 | 0.875 |
| tf_efficientnet_lite0 *tfp | 75.074 (24.926) | 92.314 (7.686) | 4.65 | bilinear | 224 | N/A |
| tf_efficientnet_lite0 | 74.842 (25.158) | 92.170 (7.830) | 4.65 | bicubic | 224 | 0.875 |
| tf_mobilenetv3_large_075 *tfp | 73.730 (26.270) | 91.616 (8.384) | 3.99 | bilinear | 224 |N/A |
| tf_mobilenetv3_large_075 | 73.442 (26.558) | 91.352 (8.648) | 3.99 | bilinear | 224 | 0.875 |
| tf_mobilenetv3_large_minimal_100 *tfp | 72.678 (27.322) | 90.860 (9.140) | 3.92 | bilinear | 224 | N/A |
| tf_mobilenetv3_large_minimal_100 | 72.244 (27.756) | 90.636 (9.364) | 3.92 | bilinear | 224 | 0.875 |
| tf_mobilenetv3_small_100 *tfp | 67.918 (32.082) | 87.958 (12.042 | 2.54 | bilinear | 224 | N/A |
| tf_mobilenetv3_small_100 | 67.918 (32.082) | 87.662 (12.338) | 2.54 | bilinear | 224 | 0.875 |
| tf_mobilenetv3_small_075 *tfp | 66.142 (33.858) | 86.498 (13.502) | 2.04 | bilinear | 224 | N/A |
| tf_mobilenetv3_small_075 | 65.718 (34.282) | 86.136 (13.864) | 2.04 | bilinear | 224 | 0.875 |
| tf_mobilenetv3_small_minimal_100 *tfp | 63.378 (36.622) | 84.802 (15.198) | 2.04 | bilinear | 224 | N/A |
| tf_mobilenetv3_small_minimal_100 | 62.898 (37.102) | 84.230 (15.770) | 2.04 | bilinear | 224 | 0.875 |
*tfp models validated with `tf-preprocessing` pipeline
Google tf and tflite weights ported from official Tensorflow repositories
* https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
* https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
* https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
## Usage
### Environment
All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically Python 3.6.x, 3.7.x, 3.8.x.
Users have reported that a Python 3 Anaconda install in Windows works. I have not verified this myself.
PyTorch versions 1.4, 1.5, 1.6 have been tested with this code.
I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda:
```
conda create -n torch-env
conda activate torch-env
conda install -c pytorch pytorch torchvision cudatoolkit=10.2
```
### PyTorch Hub
Models can be accessed via the PyTorch Hub API
```
>>> torch.hub.list('rwightman/gen-efficientnet-pytorch')
['efficientnet_b0', ...]
>>> model = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'efficientnet_b0', pretrained=True)
>>> model.eval()
>>> output = model(torch.randn(1,3,224,224))
```
### Pip
This package can be installed via pip.
Install (after conda env/install):
```
pip install geffnet
```
Eval use:
```
>>> import geffnet
>>> m = geffnet.create_model('mobilenetv3_large_100', pretrained=True)
>>> m.eval()
```
Train use:
```
>>> import geffnet
>>> # models can also be created by using the entrypoint directly
>>> m = geffnet.efficientnet_b2(pretrained=True, drop_rate=0.25, drop_connect_rate=0.2)
>>> m.train()
```
Create in a nn.Sequential container, for fast.ai, etc:
```
>>> import geffnet
>>> m = geffnet.mixnet_l(pretrained=True, drop_rate=0.25, drop_connect_rate=0.2, as_sequential=True)
```
### Exporting
Scripts are included to
* export models to ONNX (`onnx_export.py`)
* optimized ONNX graph (`onnx_optimize.py` or `onnx_validate.py` w/ `--onnx-output-opt` arg)
* validate with ONNX runtime (`onnx_validate.py`)
* convert ONNX model to Caffe2 (`onnx_to_caffe.py`)
* validate in Caffe2 (`caffe2_validate.py`)
* benchmark in Caffe2 w/ FLOPs, parameters output (`caffe2_benchmark.py`)
As an example, to export the MobileNet-V3 pretrained model and then run an Imagenet validation:
```
python onnx_export.py --model mobilenetv3_large_100 ./mobilenetv3_100.onnx
python onnx_validate.py /imagenet/validation/ --onnx-input ./mobilenetv3_100.onnx
```
These scripts were tested to be working as of PyTorch 1.6 and ONNX 1.7 w/ ONNX runtime 1.4. Caffe2 compatible
export now requires additional args mentioned in the export script (not needed in earlier versions).
#### Export Notes
1. The TF ported weights with the 'SAME' conv padding activated cannot be exported to ONNX unless `_EXPORTABLE` flag in `config.py` is set to True. Use `config.set_exportable(True)` as in the `onnx_export.py` script.
2. TF ported models with 'SAME' padding will have the padding fixed at export time to the resolution used for export. Even though dynamic padding is supported in opset >= 11, I can't get it working.
3. ONNX optimize facility doesn't work reliably in PyTorch 1.6 / ONNX 1.7. Fortunately, the onnxruntime based inference is working very well now and includes on the fly optimization.
3. ONNX / Caffe2 export/import frequently breaks with different PyTorch and ONNX version releases. Please check their respective issue trackers before filing issues here.

View File

@@ -1,65 +0,0 @@
""" Caffe2 validation script
This script runs Caffe2 benchmark on exported ONNX model.
It is a useful tool for reporting model FLOPS.
Copyright 2020 Ross Wightman
"""
import argparse
from caffe2.python import core, workspace, model_helper
from caffe2.proto import caffe2_pb2
parser = argparse.ArgumentParser(description='Caffe2 Model Benchmark')
parser.add_argument('--c2-prefix', default='', type=str, metavar='NAME',
help='caffe2 model pb name prefix')
parser.add_argument('--c2-init', default='', type=str, metavar='PATH',
help='caffe2 model init .pb')
parser.add_argument('--c2-predict', default='', type=str, metavar='PATH',
help='caffe2 model predict .pb')
parser.add_argument('-b', '--batch-size', default=1, type=int,
metavar='N', help='mini-batch size (default: 1)')
parser.add_argument('--img-size', default=224, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
def main():
args = parser.parse_args()
args.gpu_id = 0
if args.c2_prefix:
args.c2_init = args.c2_prefix + '.init.pb'
args.c2_predict = args.c2_prefix + '.predict.pb'
model = model_helper.ModelHelper(name="le_net", init_params=False)
# Bring in the init net from init_net.pb
init_net_proto = caffe2_pb2.NetDef()
with open(args.c2_init, "rb") as f:
init_net_proto.ParseFromString(f.read())
model.param_init_net = core.Net(init_net_proto)
# bring in the predict net from predict_net.pb
predict_net_proto = caffe2_pb2.NetDef()
with open(args.c2_predict, "rb") as f:
predict_net_proto.ParseFromString(f.read())
model.net = core.Net(predict_net_proto)
# CUDA performance not impressive
#device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id)
#model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
#model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
input_blob = model.net.external_inputs[0]
model.param_init_net.GaussianFill(
[],
input_blob.GetUnscopedName(),
shape=(args.batch_size, 3, args.img_size, args.img_size),
mean=0.0,
std=1.0)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, overwrite=True)
workspace.BenchmarkNet(model.net.Proto().name, 5, 20, True)
if __name__ == '__main__':
main()

View File

@@ -1,138 +0,0 @@
""" Caffe2 validation script
This script is created to verify exported ONNX models running in Caffe2
It utilizes the same PyTorch dataloader/processing pipeline for a
fair comparison against the originals.
Copyright 2020 Ross Wightman
"""
import argparse
import numpy as np
from caffe2.python import core, workspace, model_helper
from caffe2.proto import caffe2_pb2
from data import create_loader, resolve_data_config, Dataset
from utils import AverageMeter
import time
parser = argparse.ArgumentParser(description='Caffe2 ImageNet Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--c2-prefix', default='', type=str, metavar='NAME',
help='caffe2 model pb name prefix')
parser.add_argument('--c2-init', default='', type=str, metavar='PATH',
help='caffe2 model init .pb')
parser.add_argument('--c2-predict', default='', type=str, metavar='PATH',
help='caffe2 model predict .pb')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT',
help='Override default crop pct of 0.875')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true',
help='use tensorflow mnasnet preporcessing')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
def main():
args = parser.parse_args()
args.gpu_id = 0
if args.c2_prefix:
args.c2_init = args.c2_prefix + '.init.pb'
args.c2_predict = args.c2_prefix + '.predict.pb'
model = model_helper.ModelHelper(name="validation_net", init_params=False)
# Bring in the init net from init_net.pb
init_net_proto = caffe2_pb2.NetDef()
with open(args.c2_init, "rb") as f:
init_net_proto.ParseFromString(f.read())
model.param_init_net = core.Net(init_net_proto)
# bring in the predict net from predict_net.pb
predict_net_proto = caffe2_pb2.NetDef()
with open(args.c2_predict, "rb") as f:
predict_net_proto.ParseFromString(f.read())
model.net = core.Net(predict_net_proto)
data_config = resolve_data_config(None, args)
loader = create_loader(
Dataset(args.data, load_bytes=args.tf_preprocessing),
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=False,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=data_config['crop_pct'],
tensorflow_preprocessing=args.tf_preprocessing)
# this is so obvious, wonderful interface </sarcasm>
input_blob = model.net.external_inputs[0]
output_blob = model.net.external_outputs[0]
if True:
device_opts = None
else:
# CUDA is crashing, no idea why, awesome error message, give it a try for kicks
device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id)
model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
model.param_init_net.GaussianFill(
[], input_blob.GetUnscopedName(),
shape=(1,) + data_config['input_size'], mean=0.0, std=1.0)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, overwrite=True)
batch_time = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(loader):
# run the net and return prediction
caffe2_in = input.data.numpy()
workspace.FeedBlob(input_blob, caffe2_in, device_opts)
workspace.RunNet(model.net, num_iter=1)
output = workspace.FetchBlob(output_blob)
# measure accuracy and record loss
prec1, prec5 = accuracy_np(output.data, target.numpy())
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s, {ms_avg:.3f} ms/sample) \t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg,
ms_avg=100 * batch_time.avg / input.size(0), top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(
top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg))
def accuracy_np(output, target):
max_indices = np.argsort(output, axis=1)[:, ::-1]
top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean()
top1 = 100 * np.equal(max_indices[:, 0], target).mean()
return top1, top5
if __name__ == '__main__':
main()

View File

@@ -1,5 +0,0 @@
from .gen_efficientnet import *
from .mobilenetv3 import *
from .model_factory import create_model
from .config import is_exportable, is_scriptable, set_exportable, set_scriptable
from .activations import *

View File

@@ -1,137 +0,0 @@
from geffnet import config
from geffnet.activations.activations_me import *
from geffnet.activations.activations_jit import *
from geffnet.activations.activations import *
import torch
_has_silu = 'silu' in dir(torch.nn.functional)
_ACT_FN_DEFAULT = dict(
silu=F.silu if _has_silu else swish,
swish=F.silu if _has_silu else swish,
mish=mish,
relu=F.relu,
relu6=F.relu6,
sigmoid=sigmoid,
tanh=tanh,
hard_sigmoid=hard_sigmoid,
hard_swish=hard_swish,
)
_ACT_FN_JIT = dict(
silu=F.silu if _has_silu else swish_jit,
swish=F.silu if _has_silu else swish_jit,
mish=mish_jit,
)
_ACT_FN_ME = dict(
silu=F.silu if _has_silu else swish_me,
swish=F.silu if _has_silu else swish_me,
mish=mish_me,
hard_swish=hard_swish_me,
hard_sigmoid_jit=hard_sigmoid_me,
)
_ACT_LAYER_DEFAULT = dict(
silu=nn.SiLU if _has_silu else Swish,
swish=nn.SiLU if _has_silu else Swish,
mish=Mish,
relu=nn.ReLU,
relu6=nn.ReLU6,
sigmoid=Sigmoid,
tanh=Tanh,
hard_sigmoid=HardSigmoid,
hard_swish=HardSwish,
)
_ACT_LAYER_JIT = dict(
silu=nn.SiLU if _has_silu else SwishJit,
swish=nn.SiLU if _has_silu else SwishJit,
mish=MishJit,
)
_ACT_LAYER_ME = dict(
silu=nn.SiLU if _has_silu else SwishMe,
swish=nn.SiLU if _has_silu else SwishMe,
mish=MishMe,
hard_swish=HardSwishMe,
hard_sigmoid=HardSigmoidMe
)
_OVERRIDE_FN = dict()
_OVERRIDE_LAYER = dict()
def add_override_act_fn(name, fn):
global _OVERRIDE_FN
_OVERRIDE_FN[name] = fn
def update_override_act_fn(overrides):
assert isinstance(overrides, dict)
global _OVERRIDE_FN
_OVERRIDE_FN.update(overrides)
def clear_override_act_fn():
global _OVERRIDE_FN
_OVERRIDE_FN = dict()
def add_override_act_layer(name, fn):
_OVERRIDE_LAYER[name] = fn
def update_override_act_layer(overrides):
assert isinstance(overrides, dict)
global _OVERRIDE_LAYER
_OVERRIDE_LAYER.update(overrides)
def clear_override_act_layer():
global _OVERRIDE_LAYER
_OVERRIDE_LAYER = dict()
def get_act_fn(name='relu'):
""" Activation Function Factory
Fetching activation fns by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if name in _OVERRIDE_FN:
return _OVERRIDE_FN[name]
use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit())
if use_me and name in _ACT_FN_ME:
# If not exporting or scripting the model, first look for a memory optimized version
# activation with custom autograd, then fallback to jit scripted, then a Python or Torch builtin
return _ACT_FN_ME[name]
if config.is_exportable() and name in ('silu', 'swish'):
# FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
return swish
use_jit = not (config.is_exportable() or config.is_no_jit())
# NOTE: export tracing should work with jit scripted components, but I keep running into issues
if use_jit and name in _ACT_FN_JIT: # jit scripted models should be okay for export/scripting
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name]
def get_act_layer(name='relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if name in _OVERRIDE_LAYER:
return _OVERRIDE_LAYER[name]
use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit())
if use_me and name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
if config.is_exportable() and name in ('silu', 'swish'):
# FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
return Swish
use_jit = not (config.is_exportable() or config.is_no_jit())
# NOTE: export tracing should work with jit scripted components, but I keep running into issues
if use_jit and name in _ACT_FN_JIT: # jit scripted models should be okay for export/scripting
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]

View File

@@ -1,102 +0,0 @@
""" Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)
and also as Swish (https://arxiv.org/abs/1710.05941).
TODO Rename to SiLU with addition to PyTorch
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
self.inplace = inplace
def forward(self, x):
return mish(x, self.inplace)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argmument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argmument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)

View File

@@ -1,79 +0,0 @@
""" Activations (jit)
A collection of jit-scripted activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not
currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted
versions if they contain in-place ops.
Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
__all__ = ['swish_jit', 'SwishJit', 'mish_jit', 'MishJit',
'hard_sigmoid_jit', 'HardSigmoidJit', 'hard_swish_jit', 'HardSwishJit']
@torch.jit.script
def swish_jit(x, inplace: bool = False):
"""Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)
and also as Swish (https://arxiv.org/abs/1710.05941).
TODO Rename to SiLU with addition to PyTorch
"""
return x.mul(x.sigmoid())
@torch.jit.script
def mish_jit(x, _inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
return x.mul(F.softplus(x).tanh())
class SwishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(SwishJit, self).__init__()
def forward(self, x):
return swish_jit(x)
class MishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(MishJit, self).__init__()
def forward(self, x):
return mish_jit(x)
@torch.jit.script
def hard_sigmoid_jit(x, inplace: bool = False):
# return F.relu6(x + 3.) / 6.
return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
class HardSigmoidJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoidJit, self).__init__()
def forward(self, x):
return hard_sigmoid_jit(x)
@torch.jit.script
def hard_swish_jit(x, inplace: bool = False):
# return x * (F.relu6(x + 3.) / 6)
return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
class HardSwishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwishJit, self).__init__()
def forward(self, x):
return hard_swish_jit(x)

View File

@@ -1,174 +0,0 @@
""" Activations (memory-efficient w/ custom autograd)
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
These activations are not compatible with jit scripting or ONNX export of the model, please use either
the JIT or basic versions of the activations.
Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
__all__ = ['swish_me', 'SwishMe', 'mish_me', 'MishMe',
'hard_sigmoid_me', 'HardSigmoidMe', 'hard_swish_me', 'HardSwishMe']
@torch.jit.script
def swish_jit_fwd(x):
return x.mul(torch.sigmoid(x))
@torch.jit.script
def swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
class SwishJitAutoFn(torch.autograd.Function):
""" torch.jit.script optimised Swish w/ memory-efficient checkpoint
Inspired by conversation btw Jeremy Howard & Adam Pazske
https://twitter.com/jeremyphoward/status/1188251041835315200
Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)
and also as Swish (https://arxiv.org/abs/1710.05941).
TODO Rename to SiLU with addition to PyTorch
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return swish_jit_bwd(x, grad_output)
def swish_me(x, inplace=False):
return SwishJitAutoFn.apply(x)
class SwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(SwishMe, self).__init__()
def forward(self, x):
return SwishJitAutoFn.apply(x)
@torch.jit.script
def mish_jit_fwd(x):
return x.mul(torch.tanh(F.softplus(x)))
@torch.jit.script
def mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
A memory efficient, jit scripted variant of Mish
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return mish_jit_bwd(x, grad_output)
def mish_me(x, inplace=False):
return MishJitAutoFn.apply(x)
class MishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(MishMe, self).__init__()
def forward(self, x):
return MishJitAutoFn.apply(x)
@torch.jit.script
def hard_sigmoid_jit_fwd(x, inplace: bool = False):
return (x + 3).clamp(min=0, max=6).div(6.)
@torch.jit.script
def hard_sigmoid_jit_bwd(x, grad_output):
m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6.
return grad_output * m
class HardSigmoidJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_sigmoid_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_sigmoid_jit_bwd(x, grad_output)
def hard_sigmoid_me(x, inplace: bool = False):
return HardSigmoidJitAutoFn.apply(x)
class HardSigmoidMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoidMe, self).__init__()
def forward(self, x):
return HardSigmoidJitAutoFn.apply(x)
@torch.jit.script
def hard_swish_jit_fwd(x):
return x * (x + 3).clamp(min=0, max=6).div(6.)
@torch.jit.script
def hard_swish_jit_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= 3.)
m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m)
return grad_output * m
class HardSwishJitAutoFn(torch.autograd.Function):
"""A memory efficient, jit-scripted HardSwish activation"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_swish_jit_bwd(x, grad_output)
def hard_swish_me(x, inplace=False):
return HardSwishJitAutoFn.apply(x)
class HardSwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwishMe, self).__init__()
def forward(self, x):
return HardSwishJitAutoFn.apply(x)

View File

@@ -1,123 +0,0 @@
""" Global layer config state
"""
from typing import Any, Optional
__all__ = [
'is_exportable', 'is_scriptable', 'is_no_jit', 'layer_config_kwargs',
'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config'
]
# Set to True if prefer to have layers with no jit optimization (includes activations)
_NO_JIT = False
# Set to True if prefer to have activation layers with no jit optimization
# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying
# the jit flags so far are activations. This will change as more layers are updated and/or added.
_NO_ACTIVATION_JIT = False
# Set to True if exporting a model with Same padding via ONNX
_EXPORTABLE = False
# Set to True if wanting to use torch.jit.script on a model
_SCRIPTABLE = False
def is_no_jit():
return _NO_JIT
class set_no_jit:
def __init__(self, mode: bool) -> None:
global _NO_JIT
self.prev = _NO_JIT
_NO_JIT = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _NO_JIT
_NO_JIT = self.prev
return False
def is_exportable():
return _EXPORTABLE
class set_exportable:
def __init__(self, mode: bool) -> None:
global _EXPORTABLE
self.prev = _EXPORTABLE
_EXPORTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _EXPORTABLE
_EXPORTABLE = self.prev
return False
def is_scriptable():
return _SCRIPTABLE
class set_scriptable:
def __init__(self, mode: bool) -> None:
global _SCRIPTABLE
self.prev = _SCRIPTABLE
_SCRIPTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
_SCRIPTABLE = self.prev
return False
class set_layer_config:
""" Layer config context manager that allows setting all layer config flags at once.
If a flag arg is None, it will not change the current value.
"""
def __init__(
self,
scriptable: Optional[bool] = None,
exportable: Optional[bool] = None,
no_jit: Optional[bool] = None,
no_activation_jit: Optional[bool] = None):
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT
if scriptable is not None:
_SCRIPTABLE = scriptable
if exportable is not None:
_EXPORTABLE = exportable
if no_jit is not None:
_NO_JIT = no_jit
if no_activation_jit is not None:
_NO_ACTIVATION_JIT = no_activation_jit
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev
return False
def layer_config_kwargs(kwargs):
""" Consume config kwargs and return contextmgr obj """
return set_layer_config(
scriptable=kwargs.pop('scriptable', None),
exportable=kwargs.pop('exportable', None),
no_jit=kwargs.pop('no_jit', None))

Some files were not shown because too many files have changed in this diff Show More