mirror of
https://github.com/acon96/home-llm.git
synced 2026-01-09 13:48:05 -05:00
handle voice assistant aliases as duplicate devices
This commit is contained in:
18
TODO.md
18
TODO.md
@@ -1,4 +1,12 @@
|
||||
# TODO
|
||||
- [ ] setup github actions to build wheels that are optimized for RPIs
|
||||
- [ ] detection/mitigation of too many entities being exposed & blowing out the context length
|
||||
- [ ] areas/room support
|
||||
- [ ] figure out DPO for refusals + fixing incorrect entity id
|
||||
- [ ] mixtral + prompting (no fine tuning)
|
||||
- add in context learning variables to sys prompt template
|
||||
- add new options to setup process for setting prompt style + picking fine-tuned/ICL
|
||||
- [ ] prime kv cache with current "state" so that requests are faster
|
||||
- [x] ChatML format (actually need to add special tokens)
|
||||
- [x] Vicuna dataset merge (yahma/alpaca-cleaned)
|
||||
- [x] Phi-2 fine tuning
|
||||
@@ -11,7 +19,6 @@
|
||||
- [ ] multi-turn prompts; better instruct dataset like dolphin/wizardlm?
|
||||
- [x] Fine tune Phi-1.5 version
|
||||
- [x] make llama-cpp-python wheels for "llama-cpp-python>=0.2.24"
|
||||
- [ ] prime kv cache with current "state" so that requests are faster
|
||||
- [x] make a proper evaluation framework to run. not just loss. should test accuracy on the function calling
|
||||
- [x] add more remote backends
|
||||
- LocalAI (openai compatible)
|
||||
@@ -19,15 +26,8 @@
|
||||
- support chat completions API (might fix Ollama + adds support for text-gen-ui characters)
|
||||
- [x] more config options for prompt template (allow other than chatml)
|
||||
- [x] publish snapshot of dataset on HF
|
||||
- [ ] figure out DPO for refusals + fixing incorrect entity id
|
||||
- [ ] mixtral + prompting (no fine tuning)
|
||||
- add in context learning variables to sys prompt template
|
||||
- add new options to setup process for setting prompt style + picking fine-tuned/ICL
|
||||
- [x] use varied system prompts to add behaviors
|
||||
- [ ] setup github actions to build wheels that are optimized for RPIs
|
||||
- [ ] detection/mitigation of too many entities being exposed & blowing out the context length
|
||||
- [ ] entity aliases
|
||||
- [ ] areas/room support
|
||||
|
||||
|
||||
## more complicated ideas
|
||||
- [ ] "context requests"
|
||||
|
||||
@@ -18,7 +18,7 @@ from homeassistant.config_entries import ConfigEntry
|
||||
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_PORT, CONF_SSL, MATCH_ALL
|
||||
from homeassistant.core import HomeAssistant
|
||||
from homeassistant.exceptions import ConfigEntryNotReady, ConfigEntryError, TemplateError
|
||||
from homeassistant.helpers import config_validation as cv, intent, template
|
||||
from homeassistant.helpers import config_validation as cv, intent, template, entity_registry as er
|
||||
from homeassistant.util import ulid
|
||||
|
||||
from .utils import closest_color, flatten_vol_schema, install_llama_cpp_python
|
||||
@@ -338,12 +338,18 @@ class LLaMAAgent(AbstractConversationAgent):
|
||||
"""Gather exposed entity states"""
|
||||
entity_states = {}
|
||||
domains = set()
|
||||
entity_registry = er.async_get(self.hass)
|
||||
|
||||
for state in self.hass.states.async_all():
|
||||
if not async_should_expose(self.hass, CONVERSATION_DOMAIN, state.entity_id):
|
||||
continue
|
||||
|
||||
entity = entity_registry.async_get(state.entity_id)
|
||||
|
||||
attributes = dict(state.attributes)
|
||||
attributes["state"] = state.state
|
||||
if entity and entity.aliases:
|
||||
attributes["aliases"] = entity.aliases
|
||||
entity_states[state.entity_id] = attributes
|
||||
domains.add(state.domain)
|
||||
|
||||
@@ -408,9 +414,15 @@ class LLaMAAgent(AbstractConversationAgent):
|
||||
result = result + ";" + str(value)
|
||||
return result
|
||||
|
||||
formatted_states = "\n".join(
|
||||
[f"{name} '{attributes.get('friendly_name')}' = {expose_attributes(attributes)}" for name, attributes in entities_to_expose.items()]
|
||||
) + "\n"
|
||||
device_states = [f"{name} '{attributes.get('friendly_name')}' = {expose_attributes(attributes)}" for name, attributes in entities_to_expose.items()]
|
||||
|
||||
# expose devices as their alias as well
|
||||
for name, attributes in entities_to_expose.items():
|
||||
if "aliases" in attributes:
|
||||
for alias in attributes["aliases"]:
|
||||
device_states.append(f"{name} '{alias}' = {expose_attributes(attributes)}")
|
||||
|
||||
formatted_states = "\n".join(device_states) + "\n"
|
||||
|
||||
service_dict = self.hass.services.async_services()
|
||||
all_services = []
|
||||
|
||||
Reference in New Issue
Block a user