mirror of
https://github.com/vacp2p/dst-prefect-workflows.git
synced 2026-01-07 20:43:51 -05:00
Add LARS, really crappy first start
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
prefect/10ksim/**
|
||||
prefect/10ksim/
|
||||
**/.env
|
||||
**/ruby.yaml
|
||||
**/target/**
|
||||
|
||||
82
README.md
82
README.md
@@ -1,2 +1,80 @@
|
||||
# dst-argo-workflows
|
||||
A holding repo for Argo Workflow files and related code
|
||||
# prefect workflows for DST
|
||||
|
||||
Prefect is a deployment tool used to deploy workloads in the lab.
|
||||
|
||||
**dst-prefect-workflows** is a structured collection of prefect workflows for DST.
|
||||
|
||||
## Purpose
|
||||
|
||||
Prefect is used to run deployments/simulations in the lab automatically, allowing you to run multiple deployments in parallel (or in sequence) with one command.
|
||||
|
||||
# Structure
|
||||
|
||||
- `base-manifests/` - old, used to grant access to Argo Workflows to be able to affect/update running workflows within zerotesting.
|
||||
- `charts/` - Helm charts for installing and managing deployments of software for scale testing.
|
||||
- `prefect/` - Prefect workflows for deploying and managing simulations. Simple, single run.py file.
|
||||
- `scripts/` - scripts for managing the Prefect workflows.
|
||||
|
||||
## How to Use
|
||||
|
||||
1. **Install Dependencies:**
|
||||
|
||||
Install the required Python packages, including Prefect. It's recommended to use a virtual environment.
|
||||
|
||||
```jsx
|
||||
bash
|
||||
pip install -r requirements.txt
|
||||
# If you encounter system package issues, you might try:
|
||||
# pip install -U prefect --break-system-packages
|
||||
|
||||
```
|
||||
|
||||
2. **Configure Environment**:
|
||||
|
||||
Create a `.env` file in the `prefect/` directory and add your GitHub Personal Access Token with repository access:
|
||||
|
||||
```jsx
|
||||
dotenv
|
||||
GITHUB_TOKEN=ghp_YOUR_GITHUB_TOKEN
|
||||
```
|
||||
|
||||
3. **Prepare GitHub Issue:**
|
||||
|
||||
* Create a GitHub issue in the target repository.
|
||||
|
||||
* Fill in the issue body with the simulation parameters according to the template expected by `run.py` (e.g., program type, node count, duration, docker image, etc.). There will be an Issue Template that allows you to create that with a wizard.
|
||||
|
||||
* Add the `needs-scheduling` label to the issue. Ensure this label is added by an authorized user (defined in `AUTHORIZED_USERS` within `run.py`).
|
||||
|
||||
4. **Run the Prefect Flow:**
|
||||
|
||||
Execute the `run.py` script. This will start the Prefect flow, which will scan the configured GitHub repository for issues labeled `needs-scheduling`.
|
||||
|
||||
```bash
|
||||
python run.py
|
||||
|
||||
```
|
||||
|
||||
python run.py
|
||||
|
||||
```
|
||||
|
||||
The flow will:
|
||||
|
||||
* Find valid issues created by authorized users.
|
||||
|
||||
* Parse the issue body to generate simulation configurations.
|
||||
|
||||
* Deploy the simulations using Helm based on the configurations.
|
||||
|
||||
* Cleanup the simulations after they have been running for the configured duration.
|
||||
|
||||
* (In the future) update the issue label to `simulation-done` upon completion.
|
||||
|
||||
5. **Collect Results:**
|
||||
|
||||
Simulation results and logs might be stored in the `test/` directory or other locations depending on the specific Helm chart and simulation setup.
|
||||
|
||||
6. **Post-Analysis:**
|
||||
|
||||
The run.py script will also generate a summary of the simulation results and save graphs in the main folder and results in the "test" folder.
|
||||
3787
lars/Cargo.lock
generated
Normal file
3787
lars/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
40
lars/Cargo.toml
Normal file
40
lars/Cargo.toml
Normal file
@@ -0,0 +1,40 @@
|
||||
[package]
|
||||
name = "lars"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
axum = { version = "0.7", features = ["ws", "macros"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
minijinja = { version = "2.9", features = ["loader", "json"] }
|
||||
minijinja-autoreload = "2.9"
|
||||
tower-http = { version = "0.6", features = ["fs", "trace", "cors"] }
|
||||
tower-livereload = "0.9"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
kube = { version = "0.99", features = ["runtime", "derive", "client", "config"] }
|
||||
k8s-openapi = { version = "0.24", features = ["v1_28"] }
|
||||
|
||||
prometheus-client = "0.22"
|
||||
thiserror = "1.0"
|
||||
uuid = { version = "1.7", features = ["v4", "serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
sqlx = { version = "0.7", features = [ "runtime-tokio", "sqlite", "macros", "chrono", "uuid", "migrate"] }
|
||||
dotenvy = "0.15"
|
||||
|
||||
# Optional, but often useful for config management
|
||||
# config = { version = "0.14", features = ["yaml", "json", "toml", "env"] }
|
||||
# dotenvy = "0.15"
|
||||
|
||||
# Added for SSE stream building
|
||||
futures = "0.3"
|
||||
async-stream = "0.3"
|
||||
|
||||
# Added for mock data generation
|
||||
rand = "0.8"
|
||||
|
||||
[dev-dependencies]
|
||||
# Add development-specific dependencies here later if needed
|
||||
# e.g., for integration testing
|
||||
120
lars/DESIGN.md
Normal file
120
lars/DESIGN.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# LARS - Design Document
|
||||
|
||||
This document outlines the proposed high-level design for LARS (Lab Automated Resource Scheduler).
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
LARS will be implemented as a standalone, long-running Rust application, likely deployed as a Kubernetes Deployment within the lab cluster.
|
||||
|
||||
It will consist of several key components:
|
||||
|
||||
1. **API Server:** Handles incoming HTTP requests (`/request_run`, `/simulation_complete`) from simulation orchestrators like `run.py`. (Likely using a Rust web framework like `axum` or `actix-web`).
|
||||
2. **Scheduler Core:** Contains the main logic for evaluating simulation requests based on predicted costs and current load.
|
||||
3. **Monitoring Module:** Responsible for periodically fetching resource metrics from:
|
||||
* Kubernetes API Server (for node CPU/Memory usage and pod metrics). (Using `kube-rs` crate).
|
||||
* Prometheus/VictoriaMetrics (for network bandwidth and potentially other metrics). (Using a Prometheus client crate).
|
||||
4. **Cost Database:** Stores and retrieves historical simulation parameters and their observed resource costs. (Potentially using `SQLite` via `rusqlite` for embedded persistence, or connecting to an external DB).
|
||||
5. **State Manager:** Tracks the set of currently approved/running simulations and their predicted resource footprints. (Likely in-memory, potentially backed by the database for recovery).
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph LARS Service (Rust Application)
|
||||
A[API Server (axum/actix-web)] --> B(Scheduler Core);
|
||||
B --> C{Monitoring Module};
|
||||
B --> D[Cost Database (SQLite/rusqlite)];
|
||||
B --> E[State Manager];
|
||||
C --> F[Kubernetes API (kube-rs)];
|
||||
C --> G[Prometheus API (prometheus-client)];
|
||||
D -- Stores/Retrieves --> H{Persistent Storage};
|
||||
E -- Tracks --> B;
|
||||
end
|
||||
|
||||
I(Simulation Orchestrator e.g. run.py) -- HTTP POST /request_run --> A;
|
||||
I -- HTTP POST /simulation_complete --> A;
|
||||
A -- HTTP Response (approved/denied) --> I;
|
||||
|
||||
F -- Node/Pod Metrics --> K(Kubernetes Cluster);
|
||||
G -- Bandwidth/Other Metrics --> L(Prometheus/VictoriaMetrics);
|
||||
|
||||
K -- Metrics --> C;
|
||||
L -- Metrics --> C;
|
||||
|
||||
```
|
||||
|
||||
## 2. Data Flow: Admission Request (`/request_run`)
|
||||
|
||||
1. `run.py` sends `POST /request_run` with simulation params `{sim_params}`.
|
||||
2. API Server receives request, passes `{sim_params}` to Scheduler Core.
|
||||
3. Scheduler Core queries Cost Database for historical costs matching `{sim_params}`.
|
||||
4. Scheduler Core requests current overall cluster usage from Monitoring Module.
|
||||
5. Scheduler Core queries State Manager for predicted costs of currently active simulations.
|
||||
6. Scheduler Core calculates `predicted_cost = predict(historical_cost or default)`.
|
||||
7. Scheduler Core calculates `future_load = current_usage + sum(active_sim_costs) + predicted_cost`.
|
||||
8. Scheduler Core compares `future_load` against configured `target_utilization`.
|
||||
9. If `future_load <= target_utilization`:
|
||||
* Generate unique `simulation_id`.
|
||||
* Update State Manager: add `simulation_id` with `predicted_cost`.
|
||||
* API Server responds `{"decision": "approved", "simulation_id": simulation_id}`.
|
||||
10. Else:
|
||||
* API Server responds `{"decision": "denied"}`.
|
||||
|
||||
## 3. Data Flow: Learning & Monitoring
|
||||
|
||||
1. Monitoring Module periodically fetches overall cluster metrics (Node CPU/Mem, Network Bandwidth) and stores the latest values.
|
||||
2. Monitoring Module identifies pods/resources associated with active `simulation_id`s (via K8s labels set during Helm deployment).
|
||||
3. Monitoring Module tracks resource usage specifically for these labeled resources.
|
||||
4. When `POST /simulation_complete` is received for a `simulation_id`:
|
||||
* State Manager marks the simulation as completed.
|
||||
* Scheduler Core retrieves the monitored peak resource usage for that `simulation_id` from the Monitoring Module.
|
||||
* Scheduler Core calculates the differential cost (`peak_usage - baseline_before_start`).
|
||||
* Scheduler Core stores the original `{sim_params}` and the calculated `differential_cost` into the Cost Database.
|
||||
* State Manager removes the `simulation_id` from the active list.
|
||||
|
||||
## 4. Cost Model & Prediction (Initial)
|
||||
|
||||
* **Database Schema:** A simple table mapping simulation parameters (e.g., `chart`, `nodecount`, `publisher_enabled`, etc.) to observed costs (`peak_cpu_delta`, `peak_mem_delta`, `peak_network_delta`). Could use JSON blobs for parameters initially for flexibility.
|
||||
* **Lookup:** Find exact matches for parameters first. If no exact match, find the "closest" match (e.g., similar node count for the same chart) or use a pre-defined default cost for that chart type.
|
||||
* **Prediction:** Initially, the predicted cost will simply be the historical cost found in the database or the default. Future enhancements could involve averaging recent runs or simple linear scaling based on parameters like node count.
|
||||
|
||||
## 5. Persistence
|
||||
|
||||
* The Cost Database will be the primary persistent store. Using embedded SQLite simplifies deployment, storing the DB file in a PersistentVolume within Kubernetes.
|
||||
|
||||
## 6. Configuration
|
||||
|
||||
* Configuration (API ports, K8s/Prometheus endpoints, DB path, utilization targets) will be managed via environment variables or a configuration file, following standard Rust practices.
|
||||
|
||||
## 7. Error Handling
|
||||
|
||||
* API calls should handle missing parameters gracefully.
|
||||
* Failures to connect to K8s or Prometheus should be logged, and potentially lead to the scheduler entering a "safe mode" where it denies all requests until connectivity is restored.
|
||||
* Database errors should be logged.
|
||||
|
||||
## 8. Web User Interface
|
||||
|
||||
To provide visibility into LARS's state and activity, a web interface will be included.
|
||||
|
||||
* **Technology:**
|
||||
* Backend Framework: `Axum` (Rust)
|
||||
* Templating Engine: `MiniJinja` (Rust)
|
||||
* CSS Framework: `TailwindCSS`
|
||||
* **Real-time Updates:** Server-Sent Events (SSE) will be used to push updates from the server to connected web clients without requiring polling. An endpoint like `GET /status-stream` will stream updates.
|
||||
* **Functionality:** The UI will display:
|
||||
* Overall lab status (if tracked by LARS).
|
||||
* A list of simulations currently **queued** (waiting for approval), showing their parameters and predicted resource costs.
|
||||
* A list of simulations currently **active** (running), showing their parameters, predicted costs, and updating **actual** resource usage (avg/peak/median) as monitored by LARS.
|
||||
* **Backend Changes:**
|
||||
* The `State Manager` must be updated to track queued simulation requests in addition to active ones.
|
||||
* The `Monitoring Module` needs to periodically update the shared state with the latest *actual* resource usage for active simulations.
|
||||
* New Axum routes are required:
|
||||
* `GET /`: Serves the main HTML page rendered by MiniJinja.
|
||||
* `GET /static/*`: Serves static assets (Tailwind CSS output, potentially JS).
|
||||
* `GET /status-stream`: Handles SSE connections and pushes state updates.
|
||||
* **Frontend:**
|
||||
* HTML templates (`.html.j2`) rendered by MiniJinja.
|
||||
* Styling using TailwindCSS utility classes.
|
||||
* Minimal vanilla JavaScript to connect to the SSE endpoint and update the DOM based on received event data (e.g., adding/removing simulations, updating usage figures).
|
||||
* **Development Workflow:**
|
||||
* `tailwindcss` CLI run in watch mode (`--watch`) to automatically rebuild CSS on changes to templates.
|
||||
* `cargo-watch` to monitor Rust code changes and restart the Axum server.
|
||||
* `tower-livereload` (or similar) integrated into Axum (in debug builds) to trigger browser reloads upon server restart.
|
||||
17
lars/README.md
Normal file
17
lars/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# LARS - Lab Automated Resource Scheduler
|
||||
|
||||
LARS (Lab Automated Resource Scheduler) is a service designed to intelligently manage and schedule simulation workloads within the lab environment.
|
||||
|
||||
## Purpose
|
||||
|
||||
The primary goal of LARS is to maximize the utilization of lab computing resources (CPU, Memory, Network Bandwidth) by safely running multiple simulations concurrently, while preventing resource exhaustion that could destabilize the lab or interfere with simulation results.
|
||||
|
||||
It acts as a gatekeeper, learning the resource cost of different simulation configurations and using this knowledge to predict the impact of new requests. It only approves new simulations if the predicted total load remains within acceptable operational limits.
|
||||
|
||||
## Technology
|
||||
|
||||
LARS is written in **Rust** for its performance, reliability, and memory safety characteristics, which are crucial for a long-running infrastructure service.
|
||||
|
||||
## Interaction
|
||||
|
||||
LARS provides an API for simulation orchestrators (like the `dst-prefect-workflows/prefect/run.py` script) to request permission before deploying a simulation. `run.py` queries LARS with the simulation parameters, and LARS responds whether the simulation can proceed immediately or should be deferred.
|
||||
66
lars/SPEC.md
Normal file
66
lars/SPEC.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# LARS - Specification
|
||||
|
||||
This document outlines the functional and non-functional requirements for LARS (Lab Automated Resource Scheduler).
|
||||
|
||||
## 1. Functional Requirements
|
||||
|
||||
### 1.1. Admission Control API
|
||||
- LARS MUST expose an HTTP API endpoint (e.g., `POST /request_run`).
|
||||
- This endpoint MUST accept a payload describing the parameters of a requested simulation (e.g., chart type, node count, publisher settings, requested duration).
|
||||
- The endpoint MUST evaluate the request based on current lab load and predicted cost.
|
||||
- The endpoint MUST respond with a decision (`approved` or `denied`).
|
||||
- If approved, the response MUST include a unique `simulation_id` for tracking.
|
||||
|
||||
### 1.2. Simulation Completion API
|
||||
- LARS MUST expose an HTTP API endpoint (e.g., `POST /simulation_complete`).
|
||||
- This endpoint MUST accept a payload containing the `simulation_id` of a completed simulation.
|
||||
- Upon receiving this notification, LARS MUST mark the simulation as complete internally.
|
||||
|
||||
### 1.3. Resource Monitoring
|
||||
- LARS MUST continuously monitor the overall resource usage of the Kubernetes cluster nodes (CPU, Memory).
|
||||
- LARS MUST continuously monitor the overall network bandwidth usage (via Prometheus/VictoriaMetrics).
|
||||
- LARS MUST be able to identify and monitor the specific resource usage attributed to individual simulations (likely via Kubernetes labels/selectors associated with the `simulation_id`).
|
||||
|
||||
### 1.4. Cost Modeling & Prediction
|
||||
- LARS MUST maintain a persistent database storing historical resource costs (e.g., peak CPU delta, peak Memory delta, peak Network delta) associated with completed simulations, keyed by simulation parameters.
|
||||
- When evaluating a `/request_run`, LARS MUST query this database to find the cost of similar past simulations.
|
||||
- LARS MUST use historical data (or heuristics/defaults if no history exists) to predict the resource impact of the requested simulation.
|
||||
- LARS MUST factor in the predicted costs of already approved, currently running simulations when making a decision.
|
||||
|
||||
### 1.5. Scheduling Logic
|
||||
- LARS MUST maintain a configurable target utilization threshold (e.g., 80% CPU, 80% Memory, 75% Bandwidth).
|
||||
- LARS MUST approve a `/request_run` only if `current_usage + predicted_cost_of_request + predicted_cost_of_other_approved_sims <= target_utilization`.
|
||||
- LARS MUST update its internal state when a simulation is approved to account for its predicted resource allocation.
|
||||
|
||||
### 1.6. Cost Learning
|
||||
- After a simulation completes (signaled by `/simulation_complete`), LARS MUST analyze the monitored resource usage specifically attributed to that simulation's `simulation_id`.
|
||||
- LARS MUST calculate the actual differential resource cost (peak usage during simulation - baseline before simulation).
|
||||
- LARS MUST update its persistent database with this newly observed cost data, refining its future predictions.
|
||||
|
||||
## 2. Non-Functional Requirements
|
||||
|
||||
### 2.1. Reliability
|
||||
- LARS MUST be designed for continuous, long-term operation.
|
||||
- Failures in LARS should prevent new simulations from starting but MUST NOT interfere with simulations already running.
|
||||
- LARS MUST handle transient failures (e.g., temporary inability to reach Kubernetes API or Prometheus) gracefully, potentially by temporarily denying requests until connectivity is restored.
|
||||
|
||||
### 2.2. Performance
|
||||
- API responses (`/request_run`, `/simulation_complete`) SHOULD be returned quickly (e.g., within milliseconds to a few seconds) to avoid blocking the simulation orchestrator.
|
||||
- Resource monitoring SHOULD be efficient and not impose significant overhead on the cluster or monitoring systems.
|
||||
|
||||
### 2.3. Persistence
|
||||
- Learned cost data MUST survive restarts of the LARS service.
|
||||
|
||||
### 2.4. Configurability
|
||||
- Target utilization thresholds MUST be configurable.
|
||||
- API endpoints for Kubernetes and Prometheus MUST be configurable.
|
||||
- Database connection/path MUST be configurable.
|
||||
|
||||
### 2.5. Observability
|
||||
- LARS SHOULD expose metrics about its own operation (e.g., requests processed, decisions made, current predicted load, database size).
|
||||
- LARS SHOULD produce structured logs for debugging and monitoring.
|
||||
|
||||
### 2.6. Testability
|
||||
- Core logic components (scheduler, cost prediction, state management) MUST be designed in a way that facilitates unit testing.
|
||||
- Unit tests SHOULD be provided for critical logic paths, covering expected behavior, edge cases, and error handling where applicable.
|
||||
- Mocks or test doubles SHOULD be used where necessary to isolate components from external dependencies (like Kubernetes API, Prometheus, Database) during unit tests.
|
||||
BIN
lars/lars_db.sqlite
Normal file
BIN
lars/lars_db.sqlite
Normal file
Binary file not shown.
16
lars/migrations/20240402100000_init.sql
Normal file
16
lars/migrations/20240402100000_init.sql
Normal file
@@ -0,0 +1,16 @@
|
||||
-- Create cost_history table
|
||||
CREATE TABLE IF NOT EXISTS cost_history (
|
||||
chart TEXT NOT NULL,
|
||||
node_count INTEGER NOT NULL,
|
||||
-- Observed costs
|
||||
cpu_cores REAL NOT NULL,
|
||||
memory_gb REAL NOT NULL,
|
||||
-- Timestamp of observation
|
||||
observed_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
-- Unique constraint on the simulation parameters
|
||||
PRIMARY KEY (chart, node_count)
|
||||
);
|
||||
|
||||
-- Optional: Index for faster lookups if needed later
|
||||
-- CREATE INDEX IF NOT EXISTS idx_cost_history_params ON cost_history (chart, node_count);
|
||||
1
lars/node_modules/.bin/detect-libc
generated
vendored
Symbolic link
1
lars/node_modules/.bin/detect-libc
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../detect-libc/bin/detect-libc.js
|
||||
1
lars/node_modules/.bin/jiti
generated
vendored
Symbolic link
1
lars/node_modules/.bin/jiti
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../jiti/lib/jiti-cli.mjs
|
||||
1
lars/node_modules/.bin/tailwindcss
generated
vendored
Symbolic link
1
lars/node_modules/.bin/tailwindcss
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../@tailwindcss/cli/dist/index.mjs
|
||||
381
lars/node_modules/.package-lock.json
generated
vendored
Normal file
381
lars/node_modules/.package-lock.json
generated
vendored
Normal file
@@ -0,0 +1,381 @@
|
||||
{
|
||||
"name": "lars",
|
||||
"version": "1.0.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"node_modules/@parcel/watcher": {
|
||||
"version": "2.5.1",
|
||||
"resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.1.tgz",
|
||||
"integrity": "sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==",
|
||||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"detect-libc": "^1.0.3",
|
||||
"is-glob": "^4.0.3",
|
||||
"micromatch": "^4.0.5",
|
||||
"node-addon-api": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 10.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/parcel"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@parcel/watcher-android-arm64": "2.5.1",
|
||||
"@parcel/watcher-darwin-arm64": "2.5.1",
|
||||
"@parcel/watcher-darwin-x64": "2.5.1",
|
||||
"@parcel/watcher-freebsd-x64": "2.5.1",
|
||||
"@parcel/watcher-linux-arm-glibc": "2.5.1",
|
||||
"@parcel/watcher-linux-arm-musl": "2.5.1",
|
||||
"@parcel/watcher-linux-arm64-glibc": "2.5.1",
|
||||
"@parcel/watcher-linux-arm64-musl": "2.5.1",
|
||||
"@parcel/watcher-linux-x64-glibc": "2.5.1",
|
||||
"@parcel/watcher-linux-x64-musl": "2.5.1",
|
||||
"@parcel/watcher-win32-arm64": "2.5.1",
|
||||
"@parcel/watcher-win32-ia32": "2.5.1",
|
||||
"@parcel/watcher-win32-x64": "2.5.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@parcel/watcher-linux-x64-glibc": {
|
||||
"version": "2.5.1",
|
||||
"resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.1.tgz",
|
||||
"integrity": "sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/parcel"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/cli": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/cli/-/cli-4.1.0.tgz",
|
||||
"integrity": "sha512-r3coRYp4Rp00L85gwIb4AKqSX7hJ8IwvD7ENQLP8/AlBiP0tcgNy6gAQJD57ERuJljco2dstadP7kMyllzBaSw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@parcel/watcher": "^2.5.1",
|
||||
"@tailwindcss/node": "4.1.0",
|
||||
"@tailwindcss/oxide": "4.1.0",
|
||||
"enhanced-resolve": "^5.18.1",
|
||||
"mri": "^1.2.0",
|
||||
"picocolors": "^1.1.1",
|
||||
"tailwindcss": "4.1.0"
|
||||
},
|
||||
"bin": {
|
||||
"tailwindcss": "dist/index.mjs"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/node": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.0.tgz",
|
||||
"integrity": "sha512-mfgxGxFaxbsUbaGwKIAQXUSm7Qoojw53FftpoKwo4ANwr9wnDaByz4vi1gMti/xfJvmQ5lzA1DvPiX5yCHtBkQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"enhanced-resolve": "^5.18.1",
|
||||
"jiti": "^2.4.2",
|
||||
"lightningcss": "1.29.2",
|
||||
"tailwindcss": "4.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.0.tgz",
|
||||
"integrity": "sha512-A33oyZKpPFH08d7xkl13Dc8OTsbPhsuls0z9gUCxIHvn8c1BsUACddQxL6HwaeJR1fSYyXZUw8bdWcD8bVawpQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@tailwindcss/oxide-android-arm64": "4.1.0",
|
||||
"@tailwindcss/oxide-darwin-arm64": "4.1.0",
|
||||
"@tailwindcss/oxide-darwin-x64": "4.1.0",
|
||||
"@tailwindcss/oxide-freebsd-x64": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-arm64-gnu": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-arm64-musl": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-x64-gnu": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-x64-musl": "4.1.0",
|
||||
"@tailwindcss/oxide-win32-arm64-msvc": "4.1.0",
|
||||
"@tailwindcss/oxide-win32-x64-msvc": "4.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-linux-x64-gnu": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.0.tgz",
|
||||
"integrity": "sha512-cp0Rf9Wit2kZHhrV8HIoDFD8dxU2+ZTCFCFbDj3a07pGyyPwLCJm5H5VipKXgYrBaLmlYu73ERidW0S5sdEXEg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"fill-range": "^7.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/detect-libc": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz",
|
||||
"integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"detect-libc": "bin/detect-libc.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/enhanced-resolve": {
|
||||
"version": "5.18.1",
|
||||
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz",
|
||||
"integrity": "sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"graceful-fs": "^4.2.4",
|
||||
"tapable": "^2.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10.13.0"
|
||||
}
|
||||
},
|
||||
"node_modules/fill-range": {
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
||||
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"to-regex-range": "^5.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/graceful-fs": {
|
||||
"version": "4.2.11",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/is-extglob": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
|
||||
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/is-glob": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
|
||||
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-extglob": "^2.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/is-number": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
|
||||
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.12.0"
|
||||
}
|
||||
},
|
||||
"node_modules/jiti": {
|
||||
"version": "2.4.2",
|
||||
"resolved": "https://registry.npmjs.org/jiti/-/jiti-2.4.2.tgz",
|
||||
"integrity": "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"jiti": "lib/jiti-cli.mjs"
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss": {
|
||||
"version": "1.29.2",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.29.2.tgz",
|
||||
"integrity": "sha512-6b6gd/RUXKaw5keVdSEtqFVdzWnU5jMxTUjA2bVcMNPLwSQ08Sv/UodBVtETLCn7k4S1Ibxwh7k68IwLZPgKaA==",
|
||||
"dev": true,
|
||||
"license": "MPL-2.0",
|
||||
"dependencies": {
|
||||
"detect-libc": "^2.0.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 12.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/parcel"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"lightningcss-darwin-arm64": "1.29.2",
|
||||
"lightningcss-darwin-x64": "1.29.2",
|
||||
"lightningcss-freebsd-x64": "1.29.2",
|
||||
"lightningcss-linux-arm-gnueabihf": "1.29.2",
|
||||
"lightningcss-linux-arm64-gnu": "1.29.2",
|
||||
"lightningcss-linux-arm64-musl": "1.29.2",
|
||||
"lightningcss-linux-x64-gnu": "1.29.2",
|
||||
"lightningcss-linux-x64-musl": "1.29.2",
|
||||
"lightningcss-win32-arm64-msvc": "1.29.2",
|
||||
"lightningcss-win32-x64-msvc": "1.29.2"
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss-linux-x64-gnu": {
|
||||
"version": "1.29.2",
|
||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.29.2.tgz",
|
||||
"integrity": "sha512-0v6idDCPG6epLXtBH/RPkHvYx74CVziHo6TMYga8O2EiQApnUPZsbR9nFNrg2cgBzk1AYqEd95TlrsL7nYABQg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MPL-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 12.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/parcel"
|
||||
}
|
||||
},
|
||||
"node_modules/lightningcss/node_modules/detect-libc": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz",
|
||||
"integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/micromatch": {
|
||||
"version": "4.0.8",
|
||||
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
|
||||
"integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"braces": "^3.0.3",
|
||||
"picomatch": "^2.3.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mri": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz",
|
||||
"integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/node-addon-api": {
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz",
|
||||
"integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/picocolors": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
|
||||
"integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8.6"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/jonschlinkert"
|
||||
}
|
||||
},
|
||||
"node_modules/tailwindcss": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.0.tgz",
|
||||
"integrity": "sha512-vBYstoFnvUZCDxaauNGQQEvJNQgCd1vSMDRYuZZMH1xRRcTboOk1rJrW5yFkEabU9X6Yx1C4LQ+QvPOvQj4Daw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/tapable": {
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
|
||||
"integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/to-regex-range": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-number": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
21
lars/node_modules/@parcel/watcher-linux-x64-glibc/LICENSE
generated
vendored
Normal file
21
lars/node_modules/@parcel/watcher-linux-x64-glibc/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017-present Devon Govett
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
1
lars/node_modules/@parcel/watcher-linux-x64-glibc/README.md
generated
vendored
Normal file
1
lars/node_modules/@parcel/watcher-linux-x64-glibc/README.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This is the linux-x64-glibc build of @parcel/watcher. See https://github.com/parcel-bundler/watcher for details.
|
||||
33
lars/node_modules/@parcel/watcher-linux-x64-glibc/package.json
generated
vendored
Normal file
33
lars/node_modules/@parcel/watcher-linux-x64-glibc/package.json
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"name": "@parcel/watcher-linux-x64-glibc",
|
||||
"version": "2.5.1",
|
||||
"main": "watcher.node",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/parcel-bundler/watcher.git"
|
||||
},
|
||||
"description": "A native C++ Node module for querying and subscribing to filesystem events. Used by Parcel 2.",
|
||||
"license": "MIT",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/parcel"
|
||||
},
|
||||
"files": [
|
||||
"watcher.node"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 10.0.0"
|
||||
},
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"libc": [
|
||||
"glibc"
|
||||
]
|
||||
}
|
||||
BIN
lars/node_modules/@parcel/watcher-linux-x64-glibc/watcher.node
generated
vendored
Normal file
BIN
lars/node_modules/@parcel/watcher-linux-x64-glibc/watcher.node
generated
vendored
Normal file
Binary file not shown.
21
lars/node_modules/@parcel/watcher/LICENSE
generated
vendored
Normal file
21
lars/node_modules/@parcel/watcher/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017-present Devon Govett
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
135
lars/node_modules/@parcel/watcher/README.md
generated
vendored
Normal file
135
lars/node_modules/@parcel/watcher/README.md
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
# @parcel/watcher
|
||||
|
||||
A native C++ Node module for querying and subscribing to filesystem events. Used by [Parcel 2](https://github.com/parcel-bundler/parcel).
|
||||
|
||||
## Features
|
||||
|
||||
- **Watch** - subscribe to realtime recursive directory change notifications when files or directories are created, updated, or deleted.
|
||||
- **Query** - performantly query for historical change events in a directory, even when your program is not running.
|
||||
- **Native** - implemented in C++ for performance and low-level integration with the operating system.
|
||||
- **Cross platform** - includes backends for macOS, Linux, Windows, FreeBSD, and Watchman.
|
||||
- **Performant** - events are throttled in C++ so the JavaScript thread is not overwhelmed during large filesystem changes (e.g. `git checkout` or `npm install`).
|
||||
- **Scalable** - tens of thousands of files can be watched or queried at once with good performance.
|
||||
|
||||
## Example
|
||||
|
||||
```javascript
|
||||
const watcher = require('@parcel/watcher');
|
||||
const path = require('path');
|
||||
|
||||
// Subscribe to events
|
||||
let subscription = await watcher.subscribe(process.cwd(), (err, events) => {
|
||||
console.log(events);
|
||||
});
|
||||
|
||||
// later on...
|
||||
await subscription.unsubscribe();
|
||||
|
||||
// Get events since some saved snapshot in the past
|
||||
let snapshotPath = path.join(process.cwd(), 'snapshot.txt');
|
||||
let events = await watcher.getEventsSince(process.cwd(), snapshotPath);
|
||||
|
||||
// Save a snapshot for later
|
||||
await watcher.writeSnapshot(process.cwd(), snapshotPath);
|
||||
```
|
||||
|
||||
## Watching
|
||||
|
||||
`@parcel/watcher` supports subscribing to realtime notifications of changes in a directory. It works recursively, so changes in sub-directories will also be emitted.
|
||||
|
||||
Events are throttled and coalesced for performance during large changes like `git checkout` or `npm install`, and a single notification will be emitted with all of the events at the end.
|
||||
|
||||
Only one notification will be emitted per file. For example, if a file was both created and updated since the last event, you'll get only a `create` event. If a file is both created and deleted, you will not be notifed of that file. Renames cause two events: a `delete` for the old name, and a `create` for the new name.
|
||||
|
||||
```javascript
|
||||
let subscription = await watcher.subscribe(process.cwd(), (err, events) => {
|
||||
console.log(events);
|
||||
});
|
||||
```
|
||||
|
||||
Events have two properties:
|
||||
|
||||
- `type` - the event type: `create`, `update`, or `delete`.
|
||||
- `path` - the absolute path to the file or directory.
|
||||
|
||||
To unsubscribe from change notifications, call the `unsubscribe` method on the returned subscription object.
|
||||
|
||||
```javascript
|
||||
await subscription.unsubscribe();
|
||||
```
|
||||
|
||||
`@parcel/watcher` has the following watcher backends, listed in priority order:
|
||||
|
||||
- [FSEvents](https://developer.apple.com/documentation/coreservices/file_system_events) on macOS
|
||||
- [Watchman](https://facebook.github.io/watchman/) if installed
|
||||
- [inotify](http://man7.org/linux/man-pages/man7/inotify.7.html) on Linux
|
||||
- [ReadDirectoryChangesW](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365465%28v%3Dvs.85%29.aspx) on Windows
|
||||
- [kqueue](https://man.freebsd.org/cgi/man.cgi?kqueue) on FreeBSD, or as an alternative to FSEvents on macOS
|
||||
|
||||
You can specify the exact backend you wish to use by passing the `backend` option. If that backend is not available on the current platform, the default backend will be used instead. See below for the list of backend names that can be passed to the options.
|
||||
|
||||
## Querying
|
||||
|
||||
`@parcel/watcher` also supports querying for historical changes made in a directory, even when your program is not running. This makes it easy to invalidate a cache and re-build only the files that have changed, for example. It can be **significantly** faster than traversing the entire filesystem to determine what files changed, depending on the platform.
|
||||
|
||||
In order to query for historical changes, you first need a previous snapshot to compare to. This can be saved to a file with the `writeSnapshot` function, e.g. just before your program exits.
|
||||
|
||||
```javascript
|
||||
await watcher.writeSnapshot(dirPath, snapshotPath);
|
||||
```
|
||||
|
||||
When your program starts up, you can query for changes that have occurred since that snapshot using the `getEventsSince` function.
|
||||
|
||||
```javascript
|
||||
let events = await watcher.getEventsSince(dirPath, snapshotPath);
|
||||
```
|
||||
|
||||
The events returned are exactly the same as the events that would be passed to the `subscribe` callback (see above).
|
||||
|
||||
`@parcel/watcher` has the following watcher backends, listed in priority order:
|
||||
|
||||
- [FSEvents](https://developer.apple.com/documentation/coreservices/file_system_events) on macOS
|
||||
- [Watchman](https://facebook.github.io/watchman/) if installed
|
||||
- [fts](http://man7.org/linux/man-pages/man3/fts.3.html) (brute force) on Linux and FreeBSD
|
||||
- [FindFirstFile](https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-findfirstfilea) (brute force) on Windows
|
||||
|
||||
The FSEvents (macOS) and Watchman backends are significantly more performant than the brute force backends used by default on Linux and Windows, for example returning results in miliseconds instead of seconds for large directory trees. This is because a background daemon monitoring filesystem changes on those platforms allows us to query cached data rather than traversing the filesystem manually (brute force).
|
||||
|
||||
macOS has good performance with FSEvents by default. For the best performance on other platforms, install [Watchman](https://facebook.github.io/watchman/) and it will be used by `@parcel/watcher` automatically.
|
||||
|
||||
You can specify the exact backend you wish to use by passing the `backend` option. If that backend is not available on the current platform, the default backend will be used instead. See below for the list of backend names that can be passed to the options.
|
||||
|
||||
## Options
|
||||
|
||||
All of the APIs in `@parcel/watcher` support the following options, which are passed as an object as the last function argument.
|
||||
|
||||
- `ignore` - an array of paths or glob patterns to ignore. uses [`is-glob`](https://github.com/micromatch/is-glob) to distinguish paths from globs. glob patterns are parsed with [`micromatch`](https://github.com/micromatch/micromatch) (see [features](https://github.com/micromatch/micromatch#matching-features)).
|
||||
- paths can be relative or absolute and can either be files or directories. No events will be emitted about these files or directories or their children.
|
||||
- glob patterns match on relative paths from the root that is watched. No events will be emitted for matching paths.
|
||||
- `backend` - the name of an explicitly chosen backend to use. Allowed options are `"fs-events"`, `"watchman"`, `"inotify"`, `"kqueue"`, `"windows"`, or `"brute-force"` (only for querying). If the specified backend is not available on the current platform, the default backend will be used instead.
|
||||
|
||||
## WASM
|
||||
|
||||
The `@parcel/watcher-wasm` package can be used in place of `@parcel/watcher` on unsupported platforms. It relies on the Node `fs` module, so in non-Node environments such as browsers, an `fs` polyfill will be needed.
|
||||
|
||||
**Note**: the WASM implementation is significantly less efficient than the native implementations because it must crawl the file system to watch each directory individually. Use the native `@parcel/watcher` package wherever possible.
|
||||
|
||||
```js
|
||||
import {subscribe} from '@parcel/watcher-wasm';
|
||||
|
||||
// Use the module as documented above.
|
||||
subscribe(/* ... */);
|
||||
```
|
||||
|
||||
## Who is using this?
|
||||
|
||||
- [Parcel 2](https://parceljs.org/)
|
||||
- [VSCode](https://code.visualstudio.com/updates/v1_62#_file-watching-changes)
|
||||
- [Tailwind CSS Intellisense](https://github.com/tailwindlabs/tailwindcss-intellisense)
|
||||
- [Gatsby Cloud](https://twitter.com/chatsidhartha/status/1435647412828196867)
|
||||
- [Nx](https://nx.dev)
|
||||
- [Nuxt](https://nuxt.com)
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
93
lars/node_modules/@parcel/watcher/binding.gyp
generated
vendored
Normal file
93
lars/node_modules/@parcel/watcher/binding.gyp
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
{
|
||||
"targets": [
|
||||
{
|
||||
"target_name": "watcher",
|
||||
"defines": [ "NAPI_DISABLE_CPP_EXCEPTIONS" ],
|
||||
"sources": [ "src/binding.cc", "src/Watcher.cc", "src/Backend.cc", "src/DirTree.cc", "src/Glob.cc", "src/Debounce.cc" ],
|
||||
"include_dirs" : ["<!(node -p \"require('node-addon-api').include_dir\")"],
|
||||
'cflags!': [ '-fno-exceptions', '-std=c++17' ],
|
||||
'cflags_cc!': [ '-fno-exceptions', '-std=c++17' ],
|
||||
"conditions": [
|
||||
['OS=="mac"', {
|
||||
"sources": [
|
||||
"src/watchman/BSER.cc",
|
||||
"src/watchman/WatchmanBackend.cc",
|
||||
"src/shared/BruteForceBackend.cc",
|
||||
"src/unix/fts.cc",
|
||||
"src/macos/FSEventsBackend.cc",
|
||||
"src/kqueue/KqueueBackend.cc"
|
||||
],
|
||||
"link_settings": {
|
||||
"libraries": ["CoreServices.framework"]
|
||||
},
|
||||
"defines": [
|
||||
"WATCHMAN",
|
||||
"BRUTE_FORCE",
|
||||
"FS_EVENTS",
|
||||
"KQUEUE"
|
||||
],
|
||||
"xcode_settings": {
|
||||
"GCC_ENABLE_CPP_EXCEPTIONS": "YES"
|
||||
}
|
||||
}],
|
||||
['OS=="mac" and target_arch=="arm64"', {
|
||||
"xcode_settings": {
|
||||
"ARCHS": ["arm64"]
|
||||
}
|
||||
}],
|
||||
['OS=="linux" or OS=="android"', {
|
||||
"sources": [
|
||||
"src/watchman/BSER.cc",
|
||||
"src/watchman/WatchmanBackend.cc",
|
||||
"src/shared/BruteForceBackend.cc",
|
||||
"src/linux/InotifyBackend.cc",
|
||||
"src/unix/legacy.cc"
|
||||
],
|
||||
"defines": [
|
||||
"WATCHMAN",
|
||||
"INOTIFY",
|
||||
"BRUTE_FORCE"
|
||||
]
|
||||
}],
|
||||
['OS=="win"', {
|
||||
"sources": [
|
||||
"src/watchman/BSER.cc",
|
||||
"src/watchman/WatchmanBackend.cc",
|
||||
"src/shared/BruteForceBackend.cc",
|
||||
"src/windows/WindowsBackend.cc",
|
||||
"src/windows/win_utils.cc"
|
||||
],
|
||||
"defines": [
|
||||
"WATCHMAN",
|
||||
"WINDOWS",
|
||||
"BRUTE_FORCE"
|
||||
],
|
||||
"msvs_settings": {
|
||||
"VCCLCompilerTool": {
|
||||
"ExceptionHandling": 1, # /EHsc
|
||||
"AdditionalOptions": ['-std:c++17']
|
||||
}
|
||||
}
|
||||
}],
|
||||
['OS=="freebsd"', {
|
||||
"sources": [
|
||||
"src/watchman/BSER.cc",
|
||||
"src/watchman/WatchmanBackend.cc",
|
||||
"src/shared/BruteForceBackend.cc",
|
||||
"src/unix/fts.cc",
|
||||
"src/kqueue/KqueueBackend.cc"
|
||||
],
|
||||
"defines": [
|
||||
"WATCHMAN",
|
||||
"BRUTE_FORCE",
|
||||
"KQUEUE"
|
||||
]
|
||||
}]
|
||||
]
|
||||
}
|
||||
],
|
||||
"variables": {
|
||||
"openssl_fips": "",
|
||||
"node_use_dtrace": "false"
|
||||
}
|
||||
}
|
||||
49
lars/node_modules/@parcel/watcher/index.d.ts
generated
vendored
Normal file
49
lars/node_modules/@parcel/watcher/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
declare type FilePath = string;
|
||||
declare type GlobPattern = string;
|
||||
|
||||
declare namespace ParcelWatcher {
|
||||
export type BackendType =
|
||||
| 'fs-events'
|
||||
| 'watchman'
|
||||
| 'inotify'
|
||||
| 'windows'
|
||||
| 'brute-force';
|
||||
export type EventType = 'create' | 'update' | 'delete';
|
||||
export interface Options {
|
||||
ignore?: (FilePath|GlobPattern)[];
|
||||
backend?: BackendType;
|
||||
}
|
||||
export type SubscribeCallback = (
|
||||
err: Error | null,
|
||||
events: Event[]
|
||||
) => unknown;
|
||||
export interface AsyncSubscription {
|
||||
unsubscribe(): Promise<void>;
|
||||
}
|
||||
export interface Event {
|
||||
path: FilePath;
|
||||
type: EventType;
|
||||
}
|
||||
export function getEventsSince(
|
||||
dir: FilePath,
|
||||
snapshot: FilePath,
|
||||
opts?: Options
|
||||
): Promise<Event[]>;
|
||||
export function subscribe(
|
||||
dir: FilePath,
|
||||
fn: SubscribeCallback,
|
||||
opts?: Options
|
||||
): Promise<AsyncSubscription>;
|
||||
export function unsubscribe(
|
||||
dir: FilePath,
|
||||
fn: SubscribeCallback,
|
||||
opts?: Options
|
||||
): Promise<void>;
|
||||
export function writeSnapshot(
|
||||
dir: FilePath,
|
||||
snapshot: FilePath,
|
||||
opts?: Options
|
||||
): Promise<FilePath>;
|
||||
}
|
||||
|
||||
export = ParcelWatcher;
|
||||
41
lars/node_modules/@parcel/watcher/index.js
generated
vendored
Normal file
41
lars/node_modules/@parcel/watcher/index.js
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
const {createWrapper} = require('./wrapper');
|
||||
|
||||
let name = `@parcel/watcher-${process.platform}-${process.arch}`;
|
||||
if (process.platform === 'linux') {
|
||||
const { MUSL, family } = require('detect-libc');
|
||||
if (family === MUSL) {
|
||||
name += '-musl';
|
||||
} else {
|
||||
name += '-glibc';
|
||||
}
|
||||
}
|
||||
|
||||
let binding;
|
||||
try {
|
||||
binding = require(name);
|
||||
} catch (err) {
|
||||
handleError(err);
|
||||
try {
|
||||
binding = require('./build/Release/watcher.node');
|
||||
} catch (err) {
|
||||
handleError(err);
|
||||
try {
|
||||
binding = require('./build/Debug/watcher.node');
|
||||
} catch (err) {
|
||||
handleError(err);
|
||||
throw new Error(`No prebuild or local build of @parcel/watcher found. Tried ${name}. Please ensure it is installed (don't use --no-optional when installing with npm). Otherwise it is possible we don't support your platform yet. If this is the case, please report an issue to https://github.com/parcel-bundler/watcher.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function handleError(err) {
|
||||
if (err?.code !== 'MODULE_NOT_FOUND') {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
const wrapper = createWrapper(binding);
|
||||
exports.writeSnapshot = wrapper.writeSnapshot;
|
||||
exports.getEventsSince = wrapper.getEventsSince;
|
||||
exports.subscribe = wrapper.subscribe;
|
||||
exports.unsubscribe = wrapper.unsubscribe;
|
||||
48
lars/node_modules/@parcel/watcher/index.js.flow
generated
vendored
Normal file
48
lars/node_modules/@parcel/watcher/index.js.flow
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// @flow
|
||||
declare type FilePath = string;
|
||||
declare type GlobPattern = string;
|
||||
|
||||
export type BackendType =
|
||||
| 'fs-events'
|
||||
| 'watchman'
|
||||
| 'inotify'
|
||||
| 'windows'
|
||||
| 'brute-force';
|
||||
export type EventType = 'create' | 'update' | 'delete';
|
||||
export interface Options {
|
||||
ignore?: Array<FilePath | GlobPattern>,
|
||||
backend?: BackendType
|
||||
}
|
||||
export type SubscribeCallback = (
|
||||
err: ?Error,
|
||||
events: Array<Event>
|
||||
) => mixed;
|
||||
export interface AsyncSubscription {
|
||||
unsubscribe(): Promise<void>
|
||||
}
|
||||
export interface Event {
|
||||
path: FilePath,
|
||||
type: EventType
|
||||
}
|
||||
declare module.exports: {
|
||||
getEventsSince(
|
||||
dir: FilePath,
|
||||
snapshot: FilePath,
|
||||
opts?: Options
|
||||
): Promise<Array<Event>>,
|
||||
subscribe(
|
||||
dir: FilePath,
|
||||
fn: SubscribeCallback,
|
||||
opts?: Options
|
||||
): Promise<AsyncSubscription>,
|
||||
unsubscribe(
|
||||
dir: FilePath,
|
||||
fn: SubscribeCallback,
|
||||
opts?: Options
|
||||
): Promise<void>,
|
||||
writeSnapshot(
|
||||
dir: FilePath,
|
||||
snapshot: FilePath,
|
||||
opts?: Options
|
||||
): Promise<FilePath>
|
||||
}
|
||||
88
lars/node_modules/@parcel/watcher/package.json
generated
vendored
Normal file
88
lars/node_modules/@parcel/watcher/package.json
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"name": "@parcel/watcher",
|
||||
"version": "2.5.1",
|
||||
"main": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/parcel-bundler/watcher.git"
|
||||
},
|
||||
"description": "A native C++ Node module for querying and subscribing to filesystem events. Used by Parcel 2.",
|
||||
"license": "MIT",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/parcel"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.js.flow",
|
||||
"index.d.ts",
|
||||
"wrapper.js",
|
||||
"package.json",
|
||||
"README.md",
|
||||
"LICENSE",
|
||||
"src",
|
||||
"scripts/build-from-source.js",
|
||||
"binding.gyp"
|
||||
],
|
||||
"scripts": {
|
||||
"prebuild": "prebuildify --napi --strip --tag-libc",
|
||||
"format": "prettier --write \"./**/*.{js,json,md}\"",
|
||||
"build": "node-gyp rebuild",
|
||||
"install": "node scripts/build-from-source.js",
|
||||
"test": "mocha"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 10.0.0"
|
||||
},
|
||||
"husky": {
|
||||
"hooks": {
|
||||
"pre-commit": "lint-staged"
|
||||
}
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.{js,json,md}": [
|
||||
"prettier --write",
|
||||
"git add"
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
"detect-libc": "^1.0.3",
|
||||
"is-glob": "^4.0.3",
|
||||
"micromatch": "^4.0.5",
|
||||
"node-addon-api": "^7.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"esbuild": "^0.19.8",
|
||||
"fs-extra": "^10.0.0",
|
||||
"husky": "^7.0.2",
|
||||
"lint-staged": "^11.1.2",
|
||||
"mocha": "^9.1.1",
|
||||
"napi-wasm": "^1.1.0",
|
||||
"prebuildify": "^6.0.1",
|
||||
"prettier": "^2.3.2"
|
||||
},
|
||||
"binary": {
|
||||
"napi_versions": [
|
||||
3
|
||||
]
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@parcel/watcher-darwin-x64": "2.5.1",
|
||||
"@parcel/watcher-darwin-arm64": "2.5.1",
|
||||
"@parcel/watcher-win32-x64": "2.5.1",
|
||||
"@parcel/watcher-win32-arm64": "2.5.1",
|
||||
"@parcel/watcher-win32-ia32": "2.5.1",
|
||||
"@parcel/watcher-linux-x64-glibc": "2.5.1",
|
||||
"@parcel/watcher-linux-x64-musl": "2.5.1",
|
||||
"@parcel/watcher-linux-arm64-glibc": "2.5.1",
|
||||
"@parcel/watcher-linux-arm64-musl": "2.5.1",
|
||||
"@parcel/watcher-linux-arm-glibc": "2.5.1",
|
||||
"@parcel/watcher-linux-arm-musl": "2.5.1",
|
||||
"@parcel/watcher-android-arm64": "2.5.1",
|
||||
"@parcel/watcher-freebsd-x64": "2.5.1"
|
||||
}
|
||||
}
|
||||
13
lars/node_modules/@parcel/watcher/scripts/build-from-source.js
generated
vendored
Normal file
13
lars/node_modules/@parcel/watcher/scripts/build-from-source.js
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const {spawn} = require('child_process');
|
||||
|
||||
if (process.env.npm_config_build_from_source === 'true') {
|
||||
build();
|
||||
}
|
||||
|
||||
function build() {
|
||||
spawn('node-gyp', ['rebuild'], { stdio: 'inherit', shell: true }).on('exit', function (code) {
|
||||
process.exit(code);
|
||||
});
|
||||
}
|
||||
182
lars/node_modules/@parcel/watcher/src/Backend.cc
generated
vendored
Normal file
182
lars/node_modules/@parcel/watcher/src/Backend.cc
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
#ifdef FS_EVENTS
|
||||
#include "macos/FSEventsBackend.hh"
|
||||
#endif
|
||||
#ifdef WATCHMAN
|
||||
#include "watchman/WatchmanBackend.hh"
|
||||
#endif
|
||||
#ifdef WINDOWS
|
||||
#include "windows/WindowsBackend.hh"
|
||||
#endif
|
||||
#ifdef INOTIFY
|
||||
#include "linux/InotifyBackend.hh"
|
||||
#endif
|
||||
#ifdef KQUEUE
|
||||
#include "kqueue/KqueueBackend.hh"
|
||||
#endif
|
||||
#ifdef __wasm32__
|
||||
#include "wasm/WasmBackend.hh"
|
||||
#endif
|
||||
#include "shared/BruteForceBackend.hh"
|
||||
|
||||
#include "Backend.hh"
|
||||
#include <unordered_map>
|
||||
|
||||
static std::unordered_map<std::string, std::shared_ptr<Backend>> sharedBackends;
|
||||
|
||||
std::shared_ptr<Backend> getBackend(std::string backend) {
|
||||
// Use FSEvents on macOS by default.
|
||||
// Use watchman by default if available on other platforms.
|
||||
// Fall back to brute force.
|
||||
#ifdef FS_EVENTS
|
||||
if (backend == "fs-events" || backend == "default") {
|
||||
return std::make_shared<FSEventsBackend>();
|
||||
}
|
||||
#endif
|
||||
#ifdef WATCHMAN
|
||||
if ((backend == "watchman" || backend == "default") && WatchmanBackend::checkAvailable()) {
|
||||
return std::make_shared<WatchmanBackend>();
|
||||
}
|
||||
#endif
|
||||
#ifdef WINDOWS
|
||||
if (backend == "windows" || backend == "default") {
|
||||
return std::make_shared<WindowsBackend>();
|
||||
}
|
||||
#endif
|
||||
#ifdef INOTIFY
|
||||
if (backend == "inotify" || backend == "default") {
|
||||
return std::make_shared<InotifyBackend>();
|
||||
}
|
||||
#endif
|
||||
#ifdef KQUEUE
|
||||
if (backend == "kqueue" || backend == "default") {
|
||||
return std::make_shared<KqueueBackend>();
|
||||
}
|
||||
#endif
|
||||
#ifdef __wasm32__
|
||||
if (backend == "wasm" || backend == "default") {
|
||||
return std::make_shared<WasmBackend>();
|
||||
}
|
||||
#endif
|
||||
if (backend == "brute-force" || backend == "default") {
|
||||
return std::make_shared<BruteForceBackend>();
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<Backend> Backend::getShared(std::string backend) {
|
||||
auto found = sharedBackends.find(backend);
|
||||
if (found != sharedBackends.end()) {
|
||||
return found->second;
|
||||
}
|
||||
|
||||
auto result = getBackend(backend);
|
||||
if (!result) {
|
||||
return getShared("default");
|
||||
}
|
||||
|
||||
result->run();
|
||||
sharedBackends.emplace(backend, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void removeShared(Backend *backend) {
|
||||
for (auto it = sharedBackends.begin(); it != sharedBackends.end(); it++) {
|
||||
if (it->second.get() == backend) {
|
||||
sharedBackends.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Free up memory.
|
||||
if (sharedBackends.size() == 0) {
|
||||
sharedBackends.rehash(0);
|
||||
}
|
||||
}
|
||||
|
||||
void Backend::run() {
|
||||
#ifndef __wasm32__
|
||||
mThread = std::thread([this] () {
|
||||
try {
|
||||
start();
|
||||
} catch (std::exception &err) {
|
||||
handleError(err);
|
||||
}
|
||||
});
|
||||
|
||||
if (mThread.joinable()) {
|
||||
mStartedSignal.wait();
|
||||
}
|
||||
#else
|
||||
try {
|
||||
start();
|
||||
} catch (std::exception &err) {
|
||||
handleError(err);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Backend::notifyStarted() {
|
||||
mStartedSignal.notify();
|
||||
}
|
||||
|
||||
void Backend::start() {
|
||||
notifyStarted();
|
||||
}
|
||||
|
||||
Backend::~Backend() {
|
||||
#ifndef __wasm32__
|
||||
// Wait for thread to stop
|
||||
if (mThread.joinable()) {
|
||||
// If the backend is being destroyed from the thread itself, detach, otherwise join.
|
||||
if (mThread.get_id() == std::this_thread::get_id()) {
|
||||
mThread.detach();
|
||||
} else {
|
||||
mThread.join();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Backend::watch(WatcherRef watcher) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
auto res = mSubscriptions.find(watcher);
|
||||
if (res == mSubscriptions.end()) {
|
||||
try {
|
||||
this->subscribe(watcher);
|
||||
mSubscriptions.insert(watcher);
|
||||
} catch (std::exception &err) {
|
||||
unref();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Backend::unwatch(WatcherRef watcher) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
size_t deleted = mSubscriptions.erase(watcher);
|
||||
if (deleted > 0) {
|
||||
this->unsubscribe(watcher);
|
||||
unref();
|
||||
}
|
||||
}
|
||||
|
||||
void Backend::unref() {
|
||||
if (mSubscriptions.size() == 0) {
|
||||
removeShared(this);
|
||||
}
|
||||
}
|
||||
|
||||
void Backend::handleWatcherError(WatcherError &err) {
|
||||
unwatch(err.mWatcher);
|
||||
err.mWatcher->notifyError(err);
|
||||
}
|
||||
|
||||
void Backend::handleError(std::exception &err) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end(); it++) {
|
||||
(*it)->notifyError(err);
|
||||
}
|
||||
|
||||
removeShared(this);
|
||||
}
|
||||
37
lars/node_modules/@parcel/watcher/src/Backend.hh
generated
vendored
Normal file
37
lars/node_modules/@parcel/watcher/src/Backend.hh
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
#ifndef BACKEND_H
|
||||
#define BACKEND_H
|
||||
|
||||
#include "Event.hh"
|
||||
#include "Watcher.hh"
|
||||
#include "Signal.hh"
|
||||
#include <thread>
|
||||
|
||||
class Backend {
|
||||
public:
|
||||
virtual ~Backend();
|
||||
void run();
|
||||
void notifyStarted();
|
||||
|
||||
virtual void start();
|
||||
virtual void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) = 0;
|
||||
virtual void getEventsSince(WatcherRef watcher, std::string *snapshotPath) = 0;
|
||||
virtual void subscribe(WatcherRef watcher) = 0;
|
||||
virtual void unsubscribe(WatcherRef watcher) = 0;
|
||||
|
||||
static std::shared_ptr<Backend> getShared(std::string backend);
|
||||
|
||||
void watch(WatcherRef watcher);
|
||||
void unwatch(WatcherRef watcher);
|
||||
void unref();
|
||||
void handleWatcherError(WatcherError &err);
|
||||
|
||||
std::mutex mMutex;
|
||||
std::thread mThread;
|
||||
private:
|
||||
std::unordered_set<WatcherRef> mSubscriptions;
|
||||
Signal mStartedSignal;
|
||||
|
||||
void handleError(std::exception &err);
|
||||
};
|
||||
|
||||
#endif
|
||||
113
lars/node_modules/@parcel/watcher/src/Debounce.cc
generated
vendored
Normal file
113
lars/node_modules/@parcel/watcher/src/Debounce.cc
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
#include "Debounce.hh"
|
||||
|
||||
#ifdef __wasm32__
|
||||
extern "C" void on_timeout(void *ctx) {
|
||||
Debounce *debounce = (Debounce *)ctx;
|
||||
debounce->notify();
|
||||
}
|
||||
#endif
|
||||
|
||||
std::shared_ptr<Debounce> Debounce::getShared() {
|
||||
static std::weak_ptr<Debounce> sharedInstance;
|
||||
std::shared_ptr<Debounce> shared = sharedInstance.lock();
|
||||
if (!shared) {
|
||||
shared = std::make_shared<Debounce>();
|
||||
sharedInstance = shared;
|
||||
}
|
||||
|
||||
return shared;
|
||||
}
|
||||
|
||||
Debounce::Debounce() {
|
||||
mRunning = true;
|
||||
#ifndef __wasm32__
|
||||
mThread = std::thread([this] () {
|
||||
loop();
|
||||
});
|
||||
#endif
|
||||
}
|
||||
|
||||
Debounce::~Debounce() {
|
||||
mRunning = false;
|
||||
#ifndef __wasm32__
|
||||
mWaitSignal.notify();
|
||||
mThread.join();
|
||||
#endif
|
||||
}
|
||||
|
||||
void Debounce::add(void *key, std::function<void()> cb) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
mCallbacks.emplace(key, cb);
|
||||
}
|
||||
|
||||
void Debounce::remove(void *key) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
mCallbacks.erase(key);
|
||||
}
|
||||
|
||||
void Debounce::trigger() {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
#ifdef __wasm32__
|
||||
notifyIfReady();
|
||||
#else
|
||||
mWaitSignal.notify();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef __wasm32__
|
||||
void Debounce::loop() {
|
||||
while (mRunning) {
|
||||
mWaitSignal.wait();
|
||||
if (!mRunning) {
|
||||
break;
|
||||
}
|
||||
|
||||
notifyIfReady();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Debounce::notifyIfReady() {
|
||||
if (!mRunning) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If we haven't seen an event in more than the maximum wait time, notify callbacks immediately
|
||||
// to ensure that we don't wait forever. Otherwise, wait for the minimum wait time and batch
|
||||
// subsequent fast changes. This also means the first file change in a batch is notified immediately,
|
||||
// separately from the rest of the batch. This seems like an acceptable tradeoff if the common case
|
||||
// is that only a single file was updated at a time.
|
||||
auto time = std::chrono::steady_clock::now();
|
||||
if ((time - mLastTime) > std::chrono::milliseconds(MAX_WAIT_TIME)) {
|
||||
mLastTime = time;
|
||||
notify();
|
||||
} else {
|
||||
wait();
|
||||
}
|
||||
}
|
||||
|
||||
void Debounce::wait() {
|
||||
#ifdef __wasm32__
|
||||
clear_timeout(mTimeout);
|
||||
mTimeout = set_timeout(MIN_WAIT_TIME, this);
|
||||
#else
|
||||
auto status = mWaitSignal.waitFor(std::chrono::milliseconds(MIN_WAIT_TIME));
|
||||
if (mRunning && (status == std::cv_status::timeout)) {
|
||||
notify();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Debounce::notify() {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
|
||||
mLastTime = std::chrono::steady_clock::now();
|
||||
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
|
||||
auto cb = it->second;
|
||||
cb();
|
||||
}
|
||||
|
||||
#ifndef __wasm32__
|
||||
mWaitSignal.reset();
|
||||
#endif
|
||||
}
|
||||
49
lars/node_modules/@parcel/watcher/src/Debounce.hh
generated
vendored
Normal file
49
lars/node_modules/@parcel/watcher/src/Debounce.hh
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
#ifndef DEBOUNCE_H
|
||||
#define DEBOUNCE_H
|
||||
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <functional>
|
||||
#include "Signal.hh"
|
||||
|
||||
#define MIN_WAIT_TIME 50
|
||||
#define MAX_WAIT_TIME 500
|
||||
|
||||
#ifdef __wasm32__
|
||||
extern "C" {
|
||||
int set_timeout(int ms, void *ctx);
|
||||
void clear_timeout(int timeout);
|
||||
void on_timeout(void *ctx);
|
||||
};
|
||||
#endif
|
||||
|
||||
class Debounce {
|
||||
public:
|
||||
static std::shared_ptr<Debounce> getShared();
|
||||
|
||||
Debounce();
|
||||
~Debounce();
|
||||
|
||||
void add(void *key, std::function<void()> cb);
|
||||
void remove(void *key);
|
||||
void trigger();
|
||||
void notify();
|
||||
|
||||
private:
|
||||
bool mRunning;
|
||||
std::mutex mMutex;
|
||||
#ifdef __wasm32__
|
||||
int mTimeout;
|
||||
#else
|
||||
Signal mWaitSignal;
|
||||
std::thread mThread;
|
||||
#endif
|
||||
std::unordered_map<void *, std::function<void()>> mCallbacks;
|
||||
std::chrono::time_point<std::chrono::steady_clock> mLastTime;
|
||||
|
||||
void loop();
|
||||
void notifyIfReady();
|
||||
void wait();
|
||||
};
|
||||
|
||||
#endif
|
||||
152
lars/node_modules/@parcel/watcher/src/DirTree.cc
generated
vendored
Normal file
152
lars/node_modules/@parcel/watcher/src/DirTree.cc
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
#include "DirTree.hh"
|
||||
#include <inttypes.h>
|
||||
|
||||
static std::mutex mDirCacheMutex;
|
||||
static std::unordered_map<std::string, std::weak_ptr<DirTree>> dirTreeCache;
|
||||
|
||||
struct DirTreeDeleter {
|
||||
void operator()(DirTree *tree) {
|
||||
std::lock_guard<std::mutex> lock(mDirCacheMutex);
|
||||
dirTreeCache.erase(tree->root);
|
||||
delete tree;
|
||||
|
||||
// Free up memory.
|
||||
if (dirTreeCache.size() == 0) {
|
||||
dirTreeCache.rehash(0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
std::shared_ptr<DirTree> DirTree::getCached(std::string root) {
|
||||
std::lock_guard<std::mutex> lock(mDirCacheMutex);
|
||||
|
||||
auto found = dirTreeCache.find(root);
|
||||
std::shared_ptr<DirTree> tree;
|
||||
|
||||
// Use cached tree, or create an empty one.
|
||||
if (found != dirTreeCache.end()) {
|
||||
tree = found->second.lock();
|
||||
} else {
|
||||
tree = std::shared_ptr<DirTree>(new DirTree(root), DirTreeDeleter());
|
||||
dirTreeCache.emplace(root, tree);
|
||||
}
|
||||
|
||||
return tree;
|
||||
}
|
||||
|
||||
DirTree::DirTree(std::string root, FILE *f) : root(root), isComplete(true) {
|
||||
size_t size;
|
||||
if (fscanf(f, "%zu", &size)) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
DirEntry entry(f);
|
||||
entries.emplace(entry.path, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Internal find method that has no lock
|
||||
DirEntry *DirTree::_find(std::string path) {
|
||||
auto found = entries.find(path);
|
||||
if (found == entries.end()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &found->second;
|
||||
}
|
||||
|
||||
DirEntry *DirTree::add(std::string path, uint64_t mtime, bool isDir) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
||||
DirEntry entry(path, mtime, isDir);
|
||||
auto it = entries.emplace(entry.path, entry);
|
||||
return &it.first->second;
|
||||
}
|
||||
|
||||
DirEntry *DirTree::find(std::string path) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
return _find(path);
|
||||
}
|
||||
|
||||
DirEntry *DirTree::update(std::string path, uint64_t mtime) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
||||
DirEntry *found = _find(path);
|
||||
if (found) {
|
||||
found->mtime = mtime;
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
void DirTree::remove(std::string path) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
||||
DirEntry *found = _find(path);
|
||||
|
||||
// Remove all sub-entries if this is a directory
|
||||
if (found && found->isDir) {
|
||||
std::string pathStart = path + DIR_SEP;
|
||||
for (auto it = entries.begin(); it != entries.end();) {
|
||||
if (it->first.rfind(pathStart, 0) == 0) {
|
||||
it = entries.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entries.erase(path);
|
||||
}
|
||||
|
||||
void DirTree::write(FILE *f) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
||||
fprintf(f, "%zu\n", entries.size());
|
||||
for (auto it = entries.begin(); it != entries.end(); it++) {
|
||||
it->second.write(f);
|
||||
}
|
||||
}
|
||||
|
||||
void DirTree::getChanges(DirTree *snapshot, EventList &events) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
std::lock_guard<std::mutex> snapshotLock(snapshot->mMutex);
|
||||
|
||||
for (auto it = entries.begin(); it != entries.end(); it++) {
|
||||
auto found = snapshot->entries.find(it->first);
|
||||
if (found == snapshot->entries.end()) {
|
||||
events.create(it->second.path);
|
||||
} else if (found->second.mtime != it->second.mtime && !found->second.isDir && !it->second.isDir) {
|
||||
events.update(it->second.path);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto it = snapshot->entries.begin(); it != snapshot->entries.end(); it++) {
|
||||
size_t count = entries.count(it->first);
|
||||
if (count == 0) {
|
||||
events.remove(it->second.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DirEntry::DirEntry(std::string p, uint64_t t, bool d) {
|
||||
path = p;
|
||||
mtime = t;
|
||||
isDir = d;
|
||||
state = NULL;
|
||||
}
|
||||
|
||||
DirEntry::DirEntry(FILE *f) {
|
||||
size_t size;
|
||||
if (fscanf(f, "%zu", &size)) {
|
||||
path.resize(size);
|
||||
if (fread(&path[0], sizeof(char), size, f)) {
|
||||
int d = 0;
|
||||
fscanf(f, "%" PRIu64 " %d\n", &mtime, &d);
|
||||
isDir = d == 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DirEntry::write(FILE *f) const {
|
||||
fprintf(f, "%zu%s%" PRIu64 " %d\n", path.size(), path.c_str(), mtime, isDir);
|
||||
}
|
||||
50
lars/node_modules/@parcel/watcher/src/DirTree.hh
generated
vendored
Normal file
50
lars/node_modules/@parcel/watcher/src/DirTree.hh
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
#ifndef DIR_TREE_H
|
||||
#define DIR_TREE_H
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <memory>
|
||||
#include "Event.hh"
|
||||
|
||||
#ifdef _WIN32
|
||||
#define DIR_SEP "\\"
|
||||
#else
|
||||
#define DIR_SEP "/"
|
||||
#endif
|
||||
|
||||
struct DirEntry {
|
||||
std::string path;
|
||||
uint64_t mtime;
|
||||
bool isDir;
|
||||
mutable void *state;
|
||||
|
||||
DirEntry(std::string p, uint64_t t, bool d);
|
||||
DirEntry(FILE *f);
|
||||
void write(FILE *f) const;
|
||||
bool operator==(const DirEntry &other) const {
|
||||
return path == other.path;
|
||||
}
|
||||
};
|
||||
|
||||
class DirTree {
|
||||
public:
|
||||
static std::shared_ptr<DirTree> getCached(std::string root);
|
||||
DirTree(std::string root) : root(root), isComplete(false) {}
|
||||
DirTree(std::string root, FILE *f);
|
||||
DirEntry *add(std::string path, uint64_t mtime, bool isDir);
|
||||
DirEntry *find(std::string path);
|
||||
DirEntry *update(std::string path, uint64_t mtime);
|
||||
void remove(std::string path);
|
||||
void write(FILE *f);
|
||||
void getChanges(DirTree *snapshot, EventList &events);
|
||||
|
||||
std::mutex mMutex;
|
||||
std::string root;
|
||||
bool isComplete;
|
||||
std::unordered_map<std::string, DirEntry> entries;
|
||||
|
||||
private:
|
||||
DirEntry *_find(std::string path);
|
||||
};
|
||||
|
||||
#endif
|
||||
109
lars/node_modules/@parcel/watcher/src/Event.hh
generated
vendored
Normal file
109
lars/node_modules/@parcel/watcher/src/Event.hh
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
#ifndef EVENT_H
|
||||
#define EVENT_H
|
||||
|
||||
#include <string>
|
||||
#include <node_api.h>
|
||||
#include "wasm/include.h"
|
||||
#include <napi.h>
|
||||
#include <mutex>
|
||||
#include <map>
|
||||
#include <optional>
|
||||
|
||||
using namespace Napi;
|
||||
|
||||
struct Event {
|
||||
std::string path;
|
||||
bool isCreated;
|
||||
bool isDeleted;
|
||||
Event(std::string path) : path(path), isCreated(false), isDeleted(false) {}
|
||||
|
||||
Value toJS(const Env& env) {
|
||||
EscapableHandleScope scope(env);
|
||||
Object res = Object::New(env);
|
||||
std::string type = isCreated ? "create" : isDeleted ? "delete" : "update";
|
||||
res.Set(String::New(env, "path"), String::New(env, path.c_str()));
|
||||
res.Set(String::New(env, "type"), String::New(env, type.c_str()));
|
||||
return scope.Escape(res);
|
||||
}
|
||||
};
|
||||
|
||||
class EventList {
|
||||
public:
|
||||
void create(std::string path) {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
Event *event = internalUpdate(path);
|
||||
if (event->isDeleted) {
|
||||
// Assume update event when rapidly removed and created
|
||||
// https://github.com/parcel-bundler/watcher/issues/72
|
||||
event->isDeleted = false;
|
||||
} else {
|
||||
event->isCreated = true;
|
||||
}
|
||||
}
|
||||
|
||||
Event *update(std::string path) {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
return internalUpdate(path);
|
||||
}
|
||||
|
||||
void remove(std::string path) {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
Event *event = internalUpdate(path);
|
||||
event->isDeleted = true;
|
||||
}
|
||||
|
||||
size_t size() {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
return mEvents.size();
|
||||
}
|
||||
|
||||
std::vector<Event> getEvents() {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
std::vector<Event> eventsCloneVector;
|
||||
for(auto it = mEvents.begin(); it != mEvents.end(); ++it) {
|
||||
if (!(it->second.isCreated && it->second.isDeleted)) {
|
||||
eventsCloneVector.push_back(it->second);
|
||||
}
|
||||
}
|
||||
return eventsCloneVector;
|
||||
}
|
||||
|
||||
void clear() {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
mEvents.clear();
|
||||
mError.reset();
|
||||
}
|
||||
|
||||
void error(std::string err) {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
if (!mError.has_value()) {
|
||||
mError.emplace(err);
|
||||
}
|
||||
}
|
||||
|
||||
bool hasError() {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
return mError.has_value();
|
||||
}
|
||||
|
||||
std::string getError() {
|
||||
std::lock_guard<std::mutex> l(mMutex);
|
||||
return mError.value_or("");
|
||||
}
|
||||
|
||||
private:
|
||||
mutable std::mutex mMutex;
|
||||
std::map<std::string, Event> mEvents;
|
||||
std::optional<std::string> mError;
|
||||
Event *internalUpdate(std::string path) {
|
||||
auto found = mEvents.find(path);
|
||||
if (found == mEvents.end()) {
|
||||
auto it = mEvents.emplace(path, Event(path));
|
||||
return &it.first->second;
|
||||
}
|
||||
|
||||
return &found->second;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
22
lars/node_modules/@parcel/watcher/src/Glob.cc
generated
vendored
Normal file
22
lars/node_modules/@parcel/watcher/src/Glob.cc
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
#include "Glob.hh"
|
||||
|
||||
#ifdef __wasm32__
|
||||
extern "C" bool wasm_regex_match(const char *s, const char *regex);
|
||||
#endif
|
||||
|
||||
Glob::Glob(std::string raw) {
|
||||
mRaw = raw;
|
||||
mHash = std::hash<std::string>()(raw);
|
||||
#ifndef __wasm32__
|
||||
mRegex = std::regex(raw);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Glob::isIgnored(std::string relative_path) const {
|
||||
// Use native JS regex engine for wasm to reduce binary size.
|
||||
#ifdef __wasm32__
|
||||
return wasm_regex_match(relative_path.c_str(), mRaw.c_str());
|
||||
#else
|
||||
return std::regex_match(relative_path, mRegex);
|
||||
#endif
|
||||
}
|
||||
34
lars/node_modules/@parcel/watcher/src/Glob.hh
generated
vendored
Normal file
34
lars/node_modules/@parcel/watcher/src/Glob.hh
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#ifndef GLOB_H
|
||||
#define GLOB_H
|
||||
|
||||
#include <unordered_set>
|
||||
#include <regex>
|
||||
|
||||
struct Glob {
|
||||
std::size_t mHash;
|
||||
std::string mRaw;
|
||||
#ifndef __wasm32__
|
||||
std::regex mRegex;
|
||||
#endif
|
||||
|
||||
Glob(std::string raw);
|
||||
|
||||
bool operator==(const Glob &other) const {
|
||||
return mHash == other.mHash;
|
||||
}
|
||||
|
||||
bool isIgnored(std::string relative_path) const;
|
||||
};
|
||||
|
||||
namespace std
|
||||
{
|
||||
template <>
|
||||
struct hash<Glob>
|
||||
{
|
||||
size_t operator()(const Glob& g) const {
|
||||
return g.mHash;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
101
lars/node_modules/@parcel/watcher/src/PromiseRunner.hh
generated
vendored
Normal file
101
lars/node_modules/@parcel/watcher/src/PromiseRunner.hh
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
#ifndef PROMISE_RUNNER_H
|
||||
#define PROMISE_RUNNER_H
|
||||
|
||||
#include <node_api.h>
|
||||
#include "wasm/include.h"
|
||||
#include <napi.h>
|
||||
|
||||
using namespace Napi;
|
||||
|
||||
class PromiseRunner {
|
||||
public:
|
||||
const Env env;
|
||||
Promise::Deferred deferred;
|
||||
|
||||
PromiseRunner(Env env) : env(env), deferred(Promise::Deferred::New(env)) {
|
||||
napi_status status = napi_create_async_work(env, nullptr, env.Undefined(),
|
||||
onExecute, onWorkComplete, this, &work);
|
||||
if (status != napi_ok) {
|
||||
work = nullptr;
|
||||
const napi_extended_error_info *error_info = 0;
|
||||
napi_get_last_error_info(env, &error_info);
|
||||
if (error_info->error_message) {
|
||||
Error::New(env, error_info->error_message).ThrowAsJavaScriptException();
|
||||
} else {
|
||||
Error::New(env).ThrowAsJavaScriptException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~PromiseRunner() {}
|
||||
|
||||
Value queue() {
|
||||
if (work) {
|
||||
napi_status status = napi_queue_async_work(env, work);
|
||||
if (status != napi_ok) {
|
||||
onError(Error::New(env));
|
||||
}
|
||||
}
|
||||
|
||||
return deferred.Promise();
|
||||
}
|
||||
|
||||
private:
|
||||
napi_async_work work;
|
||||
std::string error;
|
||||
|
||||
static void onExecute(napi_env env, void *this_pointer) {
|
||||
PromiseRunner* self = (PromiseRunner*) this_pointer;
|
||||
try {
|
||||
self->execute();
|
||||
} catch (std::exception &err) {
|
||||
self->error = err.what();
|
||||
}
|
||||
}
|
||||
|
||||
static void onWorkComplete(napi_env env, napi_status status, void *this_pointer) {
|
||||
PromiseRunner* self = (PromiseRunner*) this_pointer;
|
||||
if (status != napi_cancelled) {
|
||||
HandleScope scope(self->env);
|
||||
if (status == napi_ok) {
|
||||
status = napi_delete_async_work(self->env, self->work);
|
||||
if (status == napi_ok) {
|
||||
if (self->error.size() == 0) {
|
||||
self->onOK();
|
||||
} else {
|
||||
self->onError(Error::New(self->env, self->error));
|
||||
}
|
||||
delete self;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fallthrough for error handling
|
||||
const napi_extended_error_info *error_info = 0;
|
||||
napi_get_last_error_info(env, &error_info);
|
||||
if (error_info->error_message){
|
||||
self->onError(Error::New(env, error_info->error_message));
|
||||
} else {
|
||||
self->onError(Error::New(env));
|
||||
}
|
||||
delete self;
|
||||
}
|
||||
|
||||
virtual void execute() {}
|
||||
virtual Value getResult() {
|
||||
return env.Null();
|
||||
}
|
||||
|
||||
void onOK() {
|
||||
HandleScope scope(env);
|
||||
Value result = getResult();
|
||||
deferred.Resolve(result);
|
||||
}
|
||||
|
||||
void onError(const Error &e) {
|
||||
deferred.Reject(e.Value());
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
46
lars/node_modules/@parcel/watcher/src/Signal.hh
generated
vendored
Normal file
46
lars/node_modules/@parcel/watcher/src/Signal.hh
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
#ifndef SIGNAL_H
|
||||
#define SIGNAL_H
|
||||
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
|
||||
class Signal {
|
||||
public:
|
||||
Signal() : mFlag(false), mWaiting(false) {}
|
||||
void wait() {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
while (!mFlag) {
|
||||
mWaiting = true;
|
||||
mCond.wait(lock);
|
||||
}
|
||||
}
|
||||
|
||||
std::cv_status waitFor(std::chrono::milliseconds ms) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
return mCond.wait_for(lock, ms);
|
||||
}
|
||||
|
||||
void notify() {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
mFlag = true;
|
||||
mCond.notify_all();
|
||||
}
|
||||
|
||||
void reset() {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
mFlag = false;
|
||||
mWaiting = false;
|
||||
}
|
||||
|
||||
bool isWaiting() {
|
||||
return mWaiting;
|
||||
}
|
||||
|
||||
private:
|
||||
bool mFlag;
|
||||
bool mWaiting;
|
||||
std::mutex mMutex;
|
||||
std::condition_variable mCond;
|
||||
};
|
||||
|
||||
#endif
|
||||
237
lars/node_modules/@parcel/watcher/src/Watcher.cc
generated
vendored
Normal file
237
lars/node_modules/@parcel/watcher/src/Watcher.cc
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
#include "Watcher.hh"
|
||||
#include <unordered_set>
|
||||
|
||||
using namespace Napi;
|
||||
|
||||
struct WatcherHash {
|
||||
std::size_t operator() (WatcherRef const &k) const {
|
||||
return std::hash<std::string>()(k->mDir);
|
||||
}
|
||||
};
|
||||
|
||||
struct WatcherCompare {
|
||||
size_t operator() (WatcherRef const &a, WatcherRef const &b) const {
|
||||
return *a == *b;
|
||||
}
|
||||
};
|
||||
|
||||
static std::unordered_set<WatcherRef , WatcherHash, WatcherCompare> sharedWatchers;
|
||||
|
||||
WatcherRef Watcher::getShared(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs) {
|
||||
WatcherRef watcher = std::make_shared<Watcher>(dir, ignorePaths, ignoreGlobs);
|
||||
auto found = sharedWatchers.find(watcher);
|
||||
if (found != sharedWatchers.end()) {
|
||||
return *found;
|
||||
}
|
||||
|
||||
sharedWatchers.insert(watcher);
|
||||
return watcher;
|
||||
}
|
||||
|
||||
void removeShared(Watcher *watcher) {
|
||||
for (auto it = sharedWatchers.begin(); it != sharedWatchers.end(); it++) {
|
||||
if (it->get() == watcher) {
|
||||
sharedWatchers.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Free up memory.
|
||||
if (sharedWatchers.size() == 0) {
|
||||
sharedWatchers.rehash(0);
|
||||
}
|
||||
}
|
||||
|
||||
Watcher::Watcher(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs)
|
||||
: mDir(dir),
|
||||
mIgnorePaths(ignorePaths),
|
||||
mIgnoreGlobs(ignoreGlobs) {
|
||||
mDebounce = Debounce::getShared();
|
||||
mDebounce->add(this, [this] () {
|
||||
triggerCallbacks();
|
||||
});
|
||||
}
|
||||
|
||||
Watcher::~Watcher() {
|
||||
mDebounce->remove(this);
|
||||
}
|
||||
|
||||
void Watcher::wait() {
|
||||
std::unique_lock<std::mutex> lk(mMutex);
|
||||
mCond.wait(lk);
|
||||
}
|
||||
|
||||
void Watcher::notify() {
|
||||
std::unique_lock<std::mutex> lk(mMutex);
|
||||
mCond.notify_all();
|
||||
|
||||
if (mCallbacks.size() > 0 && mEvents.size() > 0) {
|
||||
// We must release our lock before calling into the debouncer
|
||||
// to avoid a deadlock: the debouncer thread itself will require
|
||||
// our lock from its thread when calling into `triggerCallbacks`
|
||||
// while holding its own debouncer lock.
|
||||
lk.unlock();
|
||||
mDebounce->trigger();
|
||||
}
|
||||
}
|
||||
|
||||
struct CallbackData {
|
||||
std::string error;
|
||||
std::vector<Event> events;
|
||||
CallbackData(std::string error, std::vector<Event> events) : error(error), events(events) {}
|
||||
};
|
||||
|
||||
Value callbackEventsToJS(const Env &env, std::vector<Event> &events) {
|
||||
EscapableHandleScope scope(env);
|
||||
Array arr = Array::New(env, events.size());
|
||||
size_t currentEventIndex = 0;
|
||||
for (auto eventIterator = events.begin(); eventIterator != events.end(); eventIterator++) {
|
||||
arr.Set(currentEventIndex++, eventIterator->toJS(env));
|
||||
}
|
||||
return scope.Escape(arr);
|
||||
}
|
||||
|
||||
void callJSFunction(Napi::Env env, Function jsCallback, CallbackData *data) {
|
||||
HandleScope scope(env);
|
||||
auto err = data->error.size() > 0 ? Error::New(env, data->error).Value() : env.Null();
|
||||
auto events = callbackEventsToJS(env, data->events);
|
||||
jsCallback.Call({err, events});
|
||||
delete data;
|
||||
|
||||
// Throw errors from the callback as fatal exceptions
|
||||
// If we don't handle these node segfaults...
|
||||
if (env.IsExceptionPending()) {
|
||||
Napi::Error err = env.GetAndClearPendingException();
|
||||
napi_fatal_exception(env, err.Value());
|
||||
}
|
||||
}
|
||||
|
||||
void Watcher::notifyError(std::exception &err) {
|
||||
std::unique_lock<std::mutex> lk(mMutex);
|
||||
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
|
||||
CallbackData *data = new CallbackData(err.what(), {});
|
||||
it->tsfn.BlockingCall(data, callJSFunction);
|
||||
}
|
||||
|
||||
clearCallbacks();
|
||||
}
|
||||
|
||||
// This function is called from the debounce thread.
|
||||
void Watcher::triggerCallbacks() {
|
||||
std::unique_lock<std::mutex> lk(mMutex);
|
||||
if (mCallbacks.size() > 0 && (mEvents.size() > 0 || mEvents.hasError())) {
|
||||
auto error = mEvents.getError();
|
||||
auto events = mEvents.getEvents();
|
||||
mEvents.clear();
|
||||
|
||||
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
|
||||
it->tsfn.BlockingCall(new CallbackData(error, events), callJSFunction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This should be called from the JavaScript thread.
|
||||
bool Watcher::watch(Function callback) {
|
||||
std::unique_lock<std::mutex> lk(mMutex);
|
||||
|
||||
auto it = findCallback(callback);
|
||||
if (it != mCallbacks.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto tsfn = ThreadSafeFunction::New(
|
||||
callback.Env(),
|
||||
callback,
|
||||
"Watcher callback",
|
||||
0, // Unlimited queue
|
||||
1 // Initial thread count
|
||||
);
|
||||
|
||||
mCallbacks.push_back(Callback {
|
||||
tsfn,
|
||||
Napi::Persistent(callback),
|
||||
std::this_thread::get_id()
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// This should be called from the JavaScript thread.
|
||||
std::vector<Callback>::iterator Watcher::findCallback(Function callback) {
|
||||
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
|
||||
// Only consider callbacks created by the same thread, or V8 will panic.
|
||||
if (it->threadId == std::this_thread::get_id() && it->ref.Value() == callback) {
|
||||
return it;
|
||||
}
|
||||
}
|
||||
|
||||
return mCallbacks.end();
|
||||
}
|
||||
|
||||
// This should be called from the JavaScript thread.
|
||||
bool Watcher::unwatch(Function callback) {
|
||||
std::unique_lock<std::mutex> lk(mMutex);
|
||||
|
||||
bool removed = false;
|
||||
auto it = findCallback(callback);
|
||||
if (it != mCallbacks.end()) {
|
||||
it->tsfn.Release();
|
||||
it->ref.Unref();
|
||||
mCallbacks.erase(it);
|
||||
removed = true;
|
||||
}
|
||||
|
||||
if (removed && mCallbacks.size() == 0) {
|
||||
unref();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void Watcher::unref() {
|
||||
if (mCallbacks.size() == 0) {
|
||||
removeShared(this);
|
||||
}
|
||||
}
|
||||
|
||||
void Watcher::destroy() {
|
||||
std::unique_lock<std::mutex> lk(mMutex);
|
||||
clearCallbacks();
|
||||
}
|
||||
|
||||
// Private because it doesn't lock.
|
||||
void Watcher::clearCallbacks() {
|
||||
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
|
||||
it->tsfn.Release();
|
||||
it->ref.Unref();
|
||||
}
|
||||
|
||||
mCallbacks.clear();
|
||||
unref();
|
||||
}
|
||||
|
||||
bool Watcher::isIgnored(std::string path) {
|
||||
for (auto it = mIgnorePaths.begin(); it != mIgnorePaths.end(); it++) {
|
||||
auto dir = *it + DIR_SEP;
|
||||
if (*it == path || path.compare(0, dir.size(), dir) == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
auto basePath = mDir + DIR_SEP;
|
||||
|
||||
if (path.rfind(basePath, 0) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto relativePath = path.substr(basePath.size());
|
||||
|
||||
for (auto it = mIgnoreGlobs.begin(); it != mIgnoreGlobs.end(); it++) {
|
||||
if (it->isIgnored(relativePath)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
73
lars/node_modules/@parcel/watcher/src/Watcher.hh
generated
vendored
Normal file
73
lars/node_modules/@parcel/watcher/src/Watcher.hh
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
#ifndef WATCHER_H
|
||||
#define WATCHER_H
|
||||
|
||||
#include <condition_variable>
|
||||
#include <unordered_set>
|
||||
#include <set>
|
||||
#include <node_api.h>
|
||||
#include "Glob.hh"
|
||||
#include "Event.hh"
|
||||
#include "Debounce.hh"
|
||||
#include "DirTree.hh"
|
||||
#include "Signal.hh"
|
||||
|
||||
using namespace Napi;
|
||||
|
||||
struct Watcher;
|
||||
using WatcherRef = std::shared_ptr<Watcher>;
|
||||
|
||||
struct Callback {
|
||||
Napi::ThreadSafeFunction tsfn;
|
||||
Napi::FunctionReference ref;
|
||||
std::thread::id threadId;
|
||||
};
|
||||
|
||||
class WatcherState {
|
||||
public:
|
||||
virtual ~WatcherState() = default;
|
||||
};
|
||||
|
||||
struct Watcher {
|
||||
std::string mDir;
|
||||
std::unordered_set<std::string> mIgnorePaths;
|
||||
std::unordered_set<Glob> mIgnoreGlobs;
|
||||
EventList mEvents;
|
||||
std::shared_ptr<WatcherState> state;
|
||||
|
||||
Watcher(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs);
|
||||
~Watcher();
|
||||
|
||||
bool operator==(const Watcher &other) const {
|
||||
return mDir == other.mDir && mIgnorePaths == other.mIgnorePaths && mIgnoreGlobs == other.mIgnoreGlobs;
|
||||
}
|
||||
|
||||
void wait();
|
||||
void notify();
|
||||
void notifyError(std::exception &err);
|
||||
bool watch(Function callback);
|
||||
bool unwatch(Function callback);
|
||||
void unref();
|
||||
bool isIgnored(std::string path);
|
||||
void destroy();
|
||||
|
||||
static WatcherRef getShared(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs);
|
||||
|
||||
private:
|
||||
std::mutex mMutex;
|
||||
std::condition_variable mCond;
|
||||
std::vector<Callback> mCallbacks;
|
||||
std::shared_ptr<Debounce> mDebounce;
|
||||
|
||||
std::vector<Callback>::iterator findCallback(Function callback);
|
||||
void clearCallbacks();
|
||||
void triggerCallbacks();
|
||||
};
|
||||
|
||||
class WatcherError : public std::runtime_error {
|
||||
public:
|
||||
WatcherRef mWatcher;
|
||||
WatcherError(std::string msg, WatcherRef watcher) : std::runtime_error(msg), mWatcher(watcher) {}
|
||||
WatcherError(const char *msg, WatcherRef watcher) : std::runtime_error(msg), mWatcher(watcher) {}
|
||||
};
|
||||
|
||||
#endif
|
||||
268
lars/node_modules/@parcel/watcher/src/binding.cc
generated
vendored
Normal file
268
lars/node_modules/@parcel/watcher/src/binding.cc
generated
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
#include <unordered_set>
|
||||
#include <node_api.h>
|
||||
#include "wasm/include.h"
|
||||
#include <napi.h>
|
||||
#include "Glob.hh"
|
||||
#include "Event.hh"
|
||||
#include "Backend.hh"
|
||||
#include "Watcher.hh"
|
||||
#include "PromiseRunner.hh"
|
||||
|
||||
using namespace Napi;
|
||||
|
||||
std::unordered_set<std::string> getIgnorePaths(Env env, Value opts) {
|
||||
std::unordered_set<std::string> result;
|
||||
|
||||
if (opts.IsObject()) {
|
||||
Value v = opts.As<Object>().Get(String::New(env, "ignorePaths"));
|
||||
if (v.IsArray()) {
|
||||
Array items = v.As<Array>();
|
||||
for (size_t i = 0; i < items.Length(); i++) {
|
||||
Value item = items.Get(Number::New(env, i));
|
||||
if (item.IsString()) {
|
||||
result.insert(std::string(item.As<String>().Utf8Value().c_str()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::unordered_set<Glob> getIgnoreGlobs(Env env, Value opts) {
|
||||
std::unordered_set<Glob> result;
|
||||
|
||||
if (opts.IsObject()) {
|
||||
Value v = opts.As<Object>().Get(String::New(env, "ignoreGlobs"));
|
||||
if (v.IsArray()) {
|
||||
Array items = v.As<Array>();
|
||||
for (size_t i = 0; i < items.Length(); i++) {
|
||||
Value item = items.Get(Number::New(env, i));
|
||||
if (item.IsString()) {
|
||||
auto key = item.As<String>().Utf8Value();
|
||||
try {
|
||||
result.emplace(key);
|
||||
} catch (const std::regex_error& e) {
|
||||
Error::New(env, e.what()).ThrowAsJavaScriptException();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::shared_ptr<Backend> getBackend(Env env, Value opts) {
|
||||
Value b = opts.As<Object>().Get(String::New(env, "backend"));
|
||||
std::string backendName;
|
||||
if (b.IsString()) {
|
||||
backendName = std::string(b.As<String>().Utf8Value().c_str());
|
||||
}
|
||||
|
||||
return Backend::getShared(backendName);
|
||||
}
|
||||
|
||||
class WriteSnapshotRunner : public PromiseRunner {
|
||||
public:
|
||||
WriteSnapshotRunner(Env env, Value dir, Value snap, Value opts)
|
||||
: PromiseRunner(env),
|
||||
snapshotPath(std::string(snap.As<String>().Utf8Value().c_str())) {
|
||||
watcher = Watcher::getShared(
|
||||
std::string(dir.As<String>().Utf8Value().c_str()),
|
||||
getIgnorePaths(env, opts),
|
||||
getIgnoreGlobs(env, opts)
|
||||
);
|
||||
|
||||
backend = getBackend(env, opts);
|
||||
}
|
||||
|
||||
~WriteSnapshotRunner() {
|
||||
watcher->unref();
|
||||
backend->unref();
|
||||
}
|
||||
private:
|
||||
std::shared_ptr<Backend> backend;
|
||||
WatcherRef watcher;
|
||||
std::string snapshotPath;
|
||||
|
||||
void execute() override {
|
||||
backend->writeSnapshot(watcher, &snapshotPath);
|
||||
}
|
||||
};
|
||||
|
||||
class GetEventsSinceRunner : public PromiseRunner {
|
||||
public:
|
||||
GetEventsSinceRunner(Env env, Value dir, Value snap, Value opts)
|
||||
: PromiseRunner(env),
|
||||
snapshotPath(std::string(snap.As<String>().Utf8Value().c_str())) {
|
||||
watcher = std::make_shared<Watcher>(
|
||||
std::string(dir.As<String>().Utf8Value().c_str()),
|
||||
getIgnorePaths(env, opts),
|
||||
getIgnoreGlobs(env, opts)
|
||||
);
|
||||
|
||||
backend = getBackend(env, opts);
|
||||
}
|
||||
|
||||
~GetEventsSinceRunner() {
|
||||
watcher->unref();
|
||||
backend->unref();
|
||||
}
|
||||
private:
|
||||
std::shared_ptr<Backend> backend;
|
||||
WatcherRef watcher;
|
||||
std::string snapshotPath;
|
||||
|
||||
void execute() override {
|
||||
backend->getEventsSince(watcher, &snapshotPath);
|
||||
if (watcher->mEvents.hasError()) {
|
||||
throw std::runtime_error(watcher->mEvents.getError());
|
||||
}
|
||||
}
|
||||
|
||||
Value getResult() override {
|
||||
std::vector<Event> events = watcher->mEvents.getEvents();
|
||||
Array eventsArray = Array::New(env, events.size());
|
||||
size_t i = 0;
|
||||
for (auto it = events.begin(); it != events.end(); it++) {
|
||||
eventsArray.Set(i++, it->toJS(env));
|
||||
}
|
||||
return eventsArray;
|
||||
}
|
||||
};
|
||||
|
||||
template<class Runner>
|
||||
Value queueSnapshotWork(const CallbackInfo& info) {
|
||||
Env env = info.Env();
|
||||
if (info.Length() < 1 || !info[0].IsString()) {
|
||||
TypeError::New(env, "Expected a string").ThrowAsJavaScriptException();
|
||||
return env.Null();
|
||||
}
|
||||
|
||||
if (info.Length() < 2 || !info[1].IsString()) {
|
||||
TypeError::New(env, "Expected a string").ThrowAsJavaScriptException();
|
||||
return env.Null();
|
||||
}
|
||||
|
||||
if (info.Length() >= 3 && !info[2].IsObject()) {
|
||||
TypeError::New(env, "Expected an object").ThrowAsJavaScriptException();
|
||||
return env.Null();
|
||||
}
|
||||
|
||||
Runner *runner = new Runner(info.Env(), info[0], info[1], info[2]);
|
||||
return runner->queue();
|
||||
}
|
||||
|
||||
Value writeSnapshot(const CallbackInfo& info) {
|
||||
return queueSnapshotWork<WriteSnapshotRunner>(info);
|
||||
}
|
||||
|
||||
Value getEventsSince(const CallbackInfo& info) {
|
||||
return queueSnapshotWork<GetEventsSinceRunner>(info);
|
||||
}
|
||||
|
||||
class SubscribeRunner : public PromiseRunner {
|
||||
public:
|
||||
SubscribeRunner(Env env, Value dir, Value fn, Value opts) : PromiseRunner(env) {
|
||||
watcher = Watcher::getShared(
|
||||
std::string(dir.As<String>().Utf8Value().c_str()),
|
||||
getIgnorePaths(env, opts),
|
||||
getIgnoreGlobs(env, opts)
|
||||
);
|
||||
|
||||
backend = getBackend(env, opts);
|
||||
watcher->watch(fn.As<Function>());
|
||||
}
|
||||
|
||||
private:
|
||||
WatcherRef watcher;
|
||||
std::shared_ptr<Backend> backend;
|
||||
FunctionReference callback;
|
||||
|
||||
void execute() override {
|
||||
try {
|
||||
backend->watch(watcher);
|
||||
} catch (std::exception &err) {
|
||||
watcher->destroy();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class UnsubscribeRunner : public PromiseRunner {
|
||||
public:
|
||||
UnsubscribeRunner(Env env, Value dir, Value fn, Value opts) : PromiseRunner(env) {
|
||||
watcher = Watcher::getShared(
|
||||
std::string(dir.As<String>().Utf8Value().c_str()),
|
||||
getIgnorePaths(env, opts),
|
||||
getIgnoreGlobs(env, opts)
|
||||
);
|
||||
|
||||
backend = getBackend(env, opts);
|
||||
shouldUnwatch = watcher->unwatch(fn.As<Function>());
|
||||
}
|
||||
|
||||
private:
|
||||
WatcherRef watcher;
|
||||
std::shared_ptr<Backend> backend;
|
||||
bool shouldUnwatch;
|
||||
|
||||
void execute() override {
|
||||
if (shouldUnwatch) {
|
||||
backend->unwatch(watcher);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<class Runner>
|
||||
Value queueSubscriptionWork(const CallbackInfo& info) {
|
||||
Env env = info.Env();
|
||||
if (info.Length() < 1 || !info[0].IsString()) {
|
||||
TypeError::New(env, "Expected a string").ThrowAsJavaScriptException();
|
||||
return env.Null();
|
||||
}
|
||||
|
||||
if (info.Length() < 2 || !info[1].IsFunction()) {
|
||||
TypeError::New(env, "Expected a function").ThrowAsJavaScriptException();
|
||||
return env.Null();
|
||||
}
|
||||
|
||||
if (info.Length() >= 3 && !info[2].IsObject()) {
|
||||
TypeError::New(env, "Expected an object").ThrowAsJavaScriptException();
|
||||
return env.Null();
|
||||
}
|
||||
|
||||
Runner *runner = new Runner(info.Env(), info[0], info[1], info[2]);
|
||||
return runner->queue();
|
||||
}
|
||||
|
||||
Value subscribe(const CallbackInfo& info) {
|
||||
return queueSubscriptionWork<SubscribeRunner>(info);
|
||||
}
|
||||
|
||||
Value unsubscribe(const CallbackInfo& info) {
|
||||
return queueSubscriptionWork<UnsubscribeRunner>(info);
|
||||
}
|
||||
|
||||
Object Init(Env env, Object exports) {
|
||||
exports.Set(
|
||||
String::New(env, "writeSnapshot"),
|
||||
Function::New(env, writeSnapshot)
|
||||
);
|
||||
exports.Set(
|
||||
String::New(env, "getEventsSince"),
|
||||
Function::New(env, getEventsSince)
|
||||
);
|
||||
exports.Set(
|
||||
String::New(env, "subscribe"),
|
||||
Function::New(env, subscribe)
|
||||
);
|
||||
exports.Set(
|
||||
String::New(env, "unsubscribe"),
|
||||
Function::New(env, unsubscribe)
|
||||
);
|
||||
return exports;
|
||||
}
|
||||
|
||||
NODE_API_MODULE(watcher, Init)
|
||||
306
lars/node_modules/@parcel/watcher/src/kqueue/KqueueBackend.cc
generated
vendored
Normal file
306
lars/node_modules/@parcel/watcher/src/kqueue/KqueueBackend.cc
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
#include <memory>
|
||||
#include <poll.h>
|
||||
#include <unistd.h>
|
||||
#include <libgen.h>
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/stat.h>
|
||||
#include "KqueueBackend.hh"
|
||||
|
||||
#if __APPLE__
|
||||
#define st_mtim st_mtimespec
|
||||
#endif
|
||||
|
||||
#if !defined(O_EVTONLY)
|
||||
#define O_EVTONLY O_RDONLY
|
||||
#endif
|
||||
|
||||
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
|
||||
|
||||
void KqueueBackend::start() {
|
||||
if ((mKqueue = kqueue()) < 0) {
|
||||
throw std::runtime_error(std::string("Unable to open kqueue: ") + strerror(errno));
|
||||
}
|
||||
|
||||
// Create a pipe that we will write to when we want to end the thread.
|
||||
int err = pipe(mPipe);
|
||||
if (err == -1) {
|
||||
throw std::runtime_error(std::string("Unable to open pipe: ") + strerror(errno));
|
||||
}
|
||||
|
||||
// Subscribe kqueue to this pipe.
|
||||
struct kevent ev;
|
||||
EV_SET(
|
||||
&ev,
|
||||
mPipe[0],
|
||||
EVFILT_READ,
|
||||
EV_ADD | EV_CLEAR,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
);
|
||||
|
||||
if (kevent(mKqueue, &ev, 1, NULL, 0, 0)) {
|
||||
close(mPipe[0]);
|
||||
close(mPipe[1]);
|
||||
throw std::runtime_error(std::string("Unable to watch pipe: ") + strerror(errno));
|
||||
}
|
||||
|
||||
notifyStarted();
|
||||
|
||||
struct kevent events[128];
|
||||
|
||||
while (true) {
|
||||
int event_count = kevent(mKqueue, NULL, 0, events, 128, 0);
|
||||
if (event_count < 0 || events[0].flags == EV_ERROR) {
|
||||
throw std::runtime_error(std::string("kevent error: ") + strerror(errno));
|
||||
}
|
||||
|
||||
// Track all of the watchers that are touched so we can notify them at the end of the events.
|
||||
std::unordered_set<WatcherRef> watchers;
|
||||
|
||||
for (int i = 0; i < event_count; i++) {
|
||||
int flags = events[i].fflags;
|
||||
int fd = events[i].ident;
|
||||
if (fd == mPipe[0]) {
|
||||
// pipe was written to. break out of the loop.
|
||||
goto done;
|
||||
}
|
||||
|
||||
auto it = mFdToEntry.find(fd);
|
||||
if (it == mFdToEntry.end()) {
|
||||
// If fd wasn't in our map, we may have already stopped watching it. Ignore the event.
|
||||
continue;
|
||||
}
|
||||
|
||||
DirEntry *entry = it->second;
|
||||
|
||||
if (flags & NOTE_WRITE && entry && entry->isDir) {
|
||||
// If a write occurred on a directory, we have to diff the contents of that
|
||||
// directory to determine what file was added/deleted.
|
||||
compareDir(fd, entry->path, watchers);
|
||||
} else {
|
||||
std::vector<KqueueSubscription *> subs = findSubscriptions(entry->path);
|
||||
for (auto it = subs.begin(); it != subs.end(); it++) {
|
||||
KqueueSubscription *sub = *it;
|
||||
watchers.insert(sub->watcher);
|
||||
if (flags & (NOTE_DELETE | NOTE_RENAME | NOTE_REVOKE)) {
|
||||
sub->watcher->mEvents.remove(sub->path);
|
||||
sub->tree->remove(sub->path);
|
||||
mFdToEntry.erase((int)(size_t)entry->state);
|
||||
mSubscriptions.erase(sub->path);
|
||||
} else if (flags & (NOTE_WRITE | NOTE_ATTRIB | NOTE_EXTEND)) {
|
||||
struct stat st;
|
||||
lstat(sub->path.c_str(), &st);
|
||||
if (entry->mtime != CONVERT_TIME(st.st_mtim)) {
|
||||
entry->mtime = CONVERT_TIME(st.st_mtim);
|
||||
sub->watcher->mEvents.update(sub->path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto it = watchers.begin(); it != watchers.end(); it++) {
|
||||
(*it)->notify();
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
close(mPipe[0]);
|
||||
close(mPipe[1]);
|
||||
mEndedSignal.notify();
|
||||
}
|
||||
|
||||
KqueueBackend::~KqueueBackend() {
|
||||
write(mPipe[1], "X", 1);
|
||||
mEndedSignal.wait();
|
||||
}
|
||||
|
||||
void KqueueBackend::subscribe(WatcherRef watcher) {
|
||||
// Build a full directory tree recursively, and watch each directory.
|
||||
std::shared_ptr<DirTree> tree = getTree(watcher);
|
||||
|
||||
for (auto it = tree->entries.begin(); it != tree->entries.end(); it++) {
|
||||
bool success = watchDir(watcher, it->second.path, tree);
|
||||
if (!success) {
|
||||
throw WatcherError(std::string("error watching " + watcher->mDir + ": " + strerror(errno)), watcher);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool KqueueBackend::watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree) {
|
||||
if (watcher->isIgnored(path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DirEntry *entry = tree->find(path);
|
||||
if (!entry) {
|
||||
return false;
|
||||
}
|
||||
|
||||
KqueueSubscription sub = {
|
||||
.watcher = watcher,
|
||||
.path = path,
|
||||
.tree = tree
|
||||
};
|
||||
|
||||
if (!entry->state) {
|
||||
int fd = open(path.c_str(), O_EVTONLY);
|
||||
if (fd <= 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
struct kevent event;
|
||||
EV_SET(
|
||||
&event,
|
||||
fd,
|
||||
EVFILT_VNODE,
|
||||
EV_ADD | EV_CLEAR | EV_ENABLE,
|
||||
NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | NOTE_RENAME | NOTE_REVOKE,
|
||||
0,
|
||||
0
|
||||
);
|
||||
|
||||
if (kevent(mKqueue, &event, 1, NULL, 0, 0)) {
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
entry->state = (void *)(size_t)fd;
|
||||
mFdToEntry.emplace(fd, entry);
|
||||
}
|
||||
|
||||
sub.fd = (int)(size_t)entry->state;
|
||||
mSubscriptions.emplace(path, sub);
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<KqueueSubscription *> KqueueBackend::findSubscriptions(std::string &path) {
|
||||
// Find the subscriptions affected by this path.
|
||||
// Copy pointers to them into a vector so that modifying mSubscriptions doesn't invalidate the iterator.
|
||||
auto range = mSubscriptions.equal_range(path);
|
||||
std::vector<KqueueSubscription *> subs;
|
||||
for (auto it = range.first; it != range.second; it++) {
|
||||
subs.push_back(&it->second);
|
||||
}
|
||||
|
||||
return subs;
|
||||
}
|
||||
|
||||
bool KqueueBackend::compareDir(int fd, std::string &path, std::unordered_set<WatcherRef> &watchers) {
|
||||
// macOS doesn't support fdclosedir, so we have to duplicate the file descriptor
|
||||
// to ensure the closedir doesn't also stop watching.
|
||||
#if __APPLE__
|
||||
fd = dup(fd);
|
||||
#endif
|
||||
|
||||
DIR *dir = fdopendir(fd);
|
||||
if (dir == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// fdopendir doesn't rewind to the beginning.
|
||||
rewinddir(dir);
|
||||
|
||||
std::vector<KqueueSubscription *> subs = findSubscriptions(path);
|
||||
std::string dirStart = path + DIR_SEP;
|
||||
|
||||
std::unordered_set<std::shared_ptr<DirTree>> trees;
|
||||
for (auto it = subs.begin(); it != subs.end(); it++) {
|
||||
trees.emplace((*it)->tree);
|
||||
}
|
||||
|
||||
std::unordered_set<std::string> entries;
|
||||
struct dirent *entry;
|
||||
while ((entry = readdir(dir))) {
|
||||
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string fullpath = dirStart + entry->d_name;
|
||||
entries.emplace(fullpath);
|
||||
|
||||
for (auto it = trees.begin(); it != trees.end(); it++) {
|
||||
std::shared_ptr<DirTree> tree = *it;
|
||||
if (!tree->find(fullpath)) {
|
||||
struct stat st;
|
||||
fstatat(fd, entry->d_name, &st, AT_SYMLINK_NOFOLLOW);
|
||||
tree->add(fullpath, CONVERT_TIME(st.st_mtim), S_ISDIR(st.st_mode));
|
||||
|
||||
// Notify all watchers with the same tree.
|
||||
for (auto i = subs.begin(); i != subs.end(); i++) {
|
||||
KqueueSubscription *sub = *i;
|
||||
if (sub->tree == tree) {
|
||||
if (sub->watcher->isIgnored(fullpath)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
sub->watcher->mEvents.create(fullpath);
|
||||
watchers.emplace(sub->watcher);
|
||||
|
||||
bool success = watchDir(sub->watcher, fullpath, sub->tree);
|
||||
if (!success) {
|
||||
sub->tree->remove(fullpath);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto it = trees.begin(); it != trees.end(); it++) {
|
||||
std::shared_ptr<DirTree> tree = *it;
|
||||
for (auto entry = tree->entries.begin(); entry != tree->entries.end();) {
|
||||
|
||||
if (
|
||||
entry->first.rfind(dirStart, 0) == 0 &&
|
||||
entry->first.find(DIR_SEP, dirStart.length()) == std::string::npos &&
|
||||
entries.count(entry->first) == 0
|
||||
) {
|
||||
// Notify all watchers with the same tree.
|
||||
for (auto i = subs.begin(); i != subs.end(); i++) {
|
||||
if ((*i)->tree == tree) {
|
||||
KqueueSubscription *sub = *i;
|
||||
if (!sub->watcher->isIgnored(entry->first)) {
|
||||
sub->watcher->mEvents.remove(entry->first);
|
||||
watchers.emplace(sub->watcher);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mFdToEntry.erase((int)(size_t)entry->second.state);
|
||||
mSubscriptions.erase(entry->first);
|
||||
entry = tree->entries.erase(entry);
|
||||
} else {
|
||||
entry++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if __APPLE__
|
||||
closedir(dir);
|
||||
#else
|
||||
fdclosedir(dir);
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void KqueueBackend::unsubscribe(WatcherRef watcher) {
|
||||
// Find any subscriptions pointing to this watcher, and remove them.
|
||||
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
|
||||
if (it->second.watcher.get() == watcher.get()) {
|
||||
if (mSubscriptions.count(it->first) == 1) {
|
||||
// Closing the file descriptor automatically unwatches it in the kqueue.
|
||||
close(it->second.fd);
|
||||
mFdToEntry.erase(it->second.fd);
|
||||
}
|
||||
|
||||
it = mSubscriptions.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
35
lars/node_modules/@parcel/watcher/src/kqueue/KqueueBackend.hh
generated
vendored
Normal file
35
lars/node_modules/@parcel/watcher/src/kqueue/KqueueBackend.hh
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef KQUEUE_H
|
||||
#define KQUEUE_H
|
||||
|
||||
#include <unordered_map>
|
||||
#include <sys/event.h>
|
||||
#include "../shared/BruteForceBackend.hh"
|
||||
#include "../DirTree.hh"
|
||||
#include "../Signal.hh"
|
||||
|
||||
struct KqueueSubscription {
|
||||
WatcherRef watcher;
|
||||
std::string path;
|
||||
std::shared_ptr<DirTree> tree;
|
||||
int fd;
|
||||
};
|
||||
|
||||
class KqueueBackend : public BruteForceBackend {
|
||||
public:
|
||||
void start() override;
|
||||
~KqueueBackend();
|
||||
void subscribe(WatcherRef watcher) override;
|
||||
void unsubscribe(WatcherRef watcher) override;
|
||||
private:
|
||||
int mKqueue;
|
||||
int mPipe[2];
|
||||
std::unordered_multimap<std::string, KqueueSubscription> mSubscriptions;
|
||||
std::unordered_map<int, DirEntry *> mFdToEntry;
|
||||
Signal mEndedSignal;
|
||||
|
||||
bool watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree);
|
||||
bool compareDir(int fd, std::string &dir, std::unordered_set<WatcherRef> &watchers);
|
||||
std::vector<KqueueSubscription *> findSubscriptions(std::string &path);
|
||||
};
|
||||
|
||||
#endif
|
||||
232
lars/node_modules/@parcel/watcher/src/linux/InotifyBackend.cc
generated
vendored
Normal file
232
lars/node_modules/@parcel/watcher/src/linux/InotifyBackend.cc
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
#include <memory>
|
||||
#include <poll.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/stat.h>
|
||||
#include "InotifyBackend.hh"
|
||||
|
||||
#define INOTIFY_MASK \
|
||||
IN_ATTRIB | IN_CREATE | IN_DELETE | \
|
||||
IN_DELETE_SELF | IN_MODIFY | IN_MOVE_SELF | IN_MOVED_FROM | \
|
||||
IN_MOVED_TO | IN_DONT_FOLLOW | IN_ONLYDIR | IN_EXCL_UNLINK
|
||||
#define BUFFER_SIZE 8192
|
||||
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
|
||||
|
||||
void InotifyBackend::start() {
|
||||
// Create a pipe that we will write to when we want to end the thread.
|
||||
int err = pipe2(mPipe, O_CLOEXEC | O_NONBLOCK);
|
||||
if (err == -1) {
|
||||
throw std::runtime_error(std::string("Unable to open pipe: ") + strerror(errno));
|
||||
}
|
||||
|
||||
// Init inotify file descriptor.
|
||||
mInotify = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
|
||||
if (mInotify == -1) {
|
||||
throw std::runtime_error(std::string("Unable to initialize inotify: ") + strerror(errno));
|
||||
}
|
||||
|
||||
pollfd pollfds[2];
|
||||
pollfds[0].fd = mPipe[0];
|
||||
pollfds[0].events = POLLIN;
|
||||
pollfds[0].revents = 0;
|
||||
pollfds[1].fd = mInotify;
|
||||
pollfds[1].events = POLLIN;
|
||||
pollfds[1].revents = 0;
|
||||
|
||||
notifyStarted();
|
||||
|
||||
// Loop until we get an event from the pipe.
|
||||
while (true) {
|
||||
int result = poll(pollfds, 2, 500);
|
||||
if (result < 0) {
|
||||
throw std::runtime_error(std::string("Unable to poll: ") + strerror(errno));
|
||||
}
|
||||
|
||||
if (pollfds[0].revents) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (pollfds[1].revents) {
|
||||
handleEvents();
|
||||
}
|
||||
}
|
||||
|
||||
close(mPipe[0]);
|
||||
close(mPipe[1]);
|
||||
close(mInotify);
|
||||
|
||||
mEndedSignal.notify();
|
||||
}
|
||||
|
||||
InotifyBackend::~InotifyBackend() {
|
||||
write(mPipe[1], "X", 1);
|
||||
mEndedSignal.wait();
|
||||
}
|
||||
|
||||
// This function is called by Backend::watch which takes a lock on mMutex
|
||||
void InotifyBackend::subscribe(WatcherRef watcher) {
|
||||
// Build a full directory tree recursively, and watch each directory.
|
||||
std::shared_ptr<DirTree> tree = getTree(watcher);
|
||||
|
||||
for (auto it = tree->entries.begin(); it != tree->entries.end(); it++) {
|
||||
if (it->second.isDir) {
|
||||
bool success = watchDir(watcher, it->second.path, tree);
|
||||
if (!success) {
|
||||
throw WatcherError(std::string("inotify_add_watch on '") + it->second.path + std::string("' failed: ") + strerror(errno), watcher);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool InotifyBackend::watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree) {
|
||||
int wd = inotify_add_watch(mInotify, path.c_str(), INOTIFY_MASK);
|
||||
if (wd == -1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<InotifySubscription> sub = std::make_shared<InotifySubscription>();
|
||||
sub->tree = tree;
|
||||
sub->path = path;
|
||||
sub->watcher = watcher;
|
||||
mSubscriptions.emplace(wd, sub);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void InotifyBackend::handleEvents() {
|
||||
char buf[BUFFER_SIZE] __attribute__ ((aligned(__alignof__(struct inotify_event))));;
|
||||
struct inotify_event *event;
|
||||
|
||||
// Track all of the watchers that are touched so we can notify them at the end of the events.
|
||||
std::unordered_set<WatcherRef> watchers;
|
||||
|
||||
while (true) {
|
||||
int n = read(mInotify, &buf, BUFFER_SIZE);
|
||||
if (n < 0) {
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
||||
break;
|
||||
}
|
||||
|
||||
throw std::runtime_error(std::string("Error reading from inotify: ") + strerror(errno));
|
||||
}
|
||||
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (char *ptr = buf; ptr < buf + n; ptr += sizeof(*event) + event->len) {
|
||||
event = (struct inotify_event *)ptr;
|
||||
|
||||
if ((event->mask & IN_Q_OVERFLOW) == IN_Q_OVERFLOW) {
|
||||
// overflow
|
||||
continue;
|
||||
}
|
||||
|
||||
handleEvent(event, watchers);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto it = watchers.begin(); it != watchers.end(); it++) {
|
||||
(*it)->notify();
|
||||
}
|
||||
}
|
||||
|
||||
void InotifyBackend::handleEvent(struct inotify_event *event, std::unordered_set<WatcherRef> &watchers) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
|
||||
// Find the subscriptions for this watch descriptor
|
||||
auto range = mSubscriptions.equal_range(event->wd);
|
||||
std::unordered_set<std::shared_ptr<InotifySubscription>> set;
|
||||
for (auto it = range.first; it != range.second; it++) {
|
||||
set.insert(it->second);
|
||||
}
|
||||
|
||||
for (auto it = set.begin(); it != set.end(); it++) {
|
||||
if (handleSubscription(event, *it)) {
|
||||
watchers.insert((*it)->watcher);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool InotifyBackend::handleSubscription(struct inotify_event *event, std::shared_ptr<InotifySubscription> sub) {
|
||||
// Build full path and check if its in our ignore list.
|
||||
std::shared_ptr<Watcher> watcher = sub->watcher;
|
||||
std::string path = std::string(sub->path);
|
||||
bool isDir = event->mask & IN_ISDIR;
|
||||
|
||||
if (event->len > 0) {
|
||||
path += "/" + std::string(event->name);
|
||||
}
|
||||
|
||||
if (watcher->isIgnored(path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If this is a create, check if it's a directory and start watching if it is.
|
||||
// In any case, keep the directory tree up to date.
|
||||
if (event->mask & (IN_CREATE | IN_MOVED_TO)) {
|
||||
watcher->mEvents.create(path);
|
||||
|
||||
struct stat st;
|
||||
// Use lstat to avoid resolving symbolic links that we cannot watch anyway
|
||||
// https://github.com/parcel-bundler/watcher/issues/76
|
||||
lstat(path.c_str(), &st);
|
||||
DirEntry *entry = sub->tree->add(path, CONVERT_TIME(st.st_mtim), S_ISDIR(st.st_mode));
|
||||
|
||||
if (entry->isDir) {
|
||||
bool success = watchDir(watcher, path, sub->tree);
|
||||
if (!success) {
|
||||
sub->tree->remove(path);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if (event->mask & (IN_MODIFY | IN_ATTRIB)) {
|
||||
watcher->mEvents.update(path);
|
||||
|
||||
struct stat st;
|
||||
stat(path.c_str(), &st);
|
||||
sub->tree->update(path, CONVERT_TIME(st.st_mtim));
|
||||
} else if (event->mask & (IN_DELETE | IN_DELETE_SELF | IN_MOVED_FROM | IN_MOVE_SELF)) {
|
||||
bool isSelfEvent = (event->mask & (IN_DELETE_SELF | IN_MOVE_SELF));
|
||||
// Ignore delete/move self events unless this is the recursive watch root
|
||||
if (isSelfEvent && path != watcher->mDir) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If the entry being deleted/moved is a directory, remove it from the list of subscriptions
|
||||
// XXX: self events don't have the IN_ISDIR mask
|
||||
if (isSelfEvent || isDir) {
|
||||
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
|
||||
if (it->second->path == path) {
|
||||
it = mSubscriptions.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
watcher->mEvents.remove(path);
|
||||
sub->tree->remove(path);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// This function is called by Backend::unwatch which takes a lock on mMutex
|
||||
void InotifyBackend::unsubscribe(WatcherRef watcher) {
|
||||
// Find any subscriptions pointing to this watcher, and remove them.
|
||||
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
|
||||
if (it->second->watcher.get() == watcher.get()) {
|
||||
if (mSubscriptions.count(it->first) == 1) {
|
||||
int err = inotify_rm_watch(mInotify, it->first);
|
||||
if (err == -1) {
|
||||
throw WatcherError(std::string("Unable to remove watcher: ") + strerror(errno), watcher);
|
||||
}
|
||||
}
|
||||
|
||||
it = mSubscriptions.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
34
lars/node_modules/@parcel/watcher/src/linux/InotifyBackend.hh
generated
vendored
Normal file
34
lars/node_modules/@parcel/watcher/src/linux/InotifyBackend.hh
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#ifndef INOTIFY_H
|
||||
#define INOTIFY_H
|
||||
|
||||
#include <unordered_map>
|
||||
#include <sys/inotify.h>
|
||||
#include "../shared/BruteForceBackend.hh"
|
||||
#include "../DirTree.hh"
|
||||
#include "../Signal.hh"
|
||||
|
||||
struct InotifySubscription {
|
||||
std::shared_ptr<DirTree> tree;
|
||||
std::string path;
|
||||
WatcherRef watcher;
|
||||
};
|
||||
|
||||
class InotifyBackend : public BruteForceBackend {
|
||||
public:
|
||||
void start() override;
|
||||
~InotifyBackend();
|
||||
void subscribe(WatcherRef watcher) override;
|
||||
void unsubscribe(WatcherRef watcher) override;
|
||||
private:
|
||||
int mPipe[2];
|
||||
int mInotify;
|
||||
std::unordered_multimap<int, std::shared_ptr<InotifySubscription>> mSubscriptions;
|
||||
Signal mEndedSignal;
|
||||
|
||||
bool watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree);
|
||||
void handleEvents();
|
||||
void handleEvent(struct inotify_event *event, std::unordered_set<WatcherRef> &watchers);
|
||||
bool handleSubscription(struct inotify_event *event, std::shared_ptr<InotifySubscription> sub);
|
||||
};
|
||||
|
||||
#endif
|
||||
338
lars/node_modules/@parcel/watcher/src/macos/FSEventsBackend.cc
generated
vendored
Normal file
338
lars/node_modules/@parcel/watcher/src/macos/FSEventsBackend.cc
generated
vendored
Normal file
@@ -0,0 +1,338 @@
|
||||
#include <CoreServices/CoreServices.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <unordered_set>
|
||||
#include "../Event.hh"
|
||||
#include "../Backend.hh"
|
||||
#include "./FSEventsBackend.hh"
|
||||
#include "../Watcher.hh"
|
||||
|
||||
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
|
||||
#define IGNORED_FLAGS (kFSEventStreamEventFlagItemIsHardlink | kFSEventStreamEventFlagItemIsLastHardlink | kFSEventStreamEventFlagItemIsSymlink | kFSEventStreamEventFlagItemIsDir | kFSEventStreamEventFlagItemIsFile)
|
||||
|
||||
void stopStream(FSEventStreamRef stream, CFRunLoopRef runLoop) {
|
||||
FSEventStreamStop(stream);
|
||||
FSEventStreamUnscheduleFromRunLoop(stream, runLoop, kCFRunLoopDefaultMode);
|
||||
FSEventStreamInvalidate(stream);
|
||||
FSEventStreamRelease(stream);
|
||||
}
|
||||
|
||||
// macOS has a case insensitive file system by default. In order to detect
|
||||
// file renames that only affect case, we need to get the canonical path
|
||||
// and compare it with the input path to determine if a file was created or deleted.
|
||||
bool pathExists(char *path) {
|
||||
int fd = open(path, O_RDONLY | O_SYMLINK);
|
||||
if (fd == -1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
char buf[PATH_MAX];
|
||||
if (fcntl(fd, F_GETPATH, buf) == -1) {
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool res = strncmp(path, buf, PATH_MAX) == 0;
|
||||
close(fd);
|
||||
return res;
|
||||
}
|
||||
|
||||
class State: public WatcherState {
|
||||
public:
|
||||
FSEventStreamRef stream;
|
||||
std::shared_ptr<DirTree> tree;
|
||||
uint64_t since;
|
||||
};
|
||||
|
||||
void FSEventsCallback(
|
||||
ConstFSEventStreamRef streamRef,
|
||||
void *clientCallBackInfo,
|
||||
size_t numEvents,
|
||||
void *eventPaths,
|
||||
const FSEventStreamEventFlags eventFlags[],
|
||||
const FSEventStreamEventId eventIds[]
|
||||
) {
|
||||
char **paths = (char **)eventPaths;
|
||||
std::shared_ptr<Watcher>& watcher = *static_cast<std::shared_ptr<Watcher> *>(clientCallBackInfo);
|
||||
|
||||
EventList& list = watcher->mEvents;
|
||||
if (watcher->state == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto stateGuard = watcher->state;
|
||||
auto* state = static_cast<State*>(stateGuard.get());
|
||||
uint64_t since = state->since;
|
||||
bool deletedRoot = false;
|
||||
|
||||
for (size_t i = 0; i < numEvents; ++i) {
|
||||
bool isCreated = (eventFlags[i] & kFSEventStreamEventFlagItemCreated) == kFSEventStreamEventFlagItemCreated;
|
||||
bool isRemoved = (eventFlags[i] & kFSEventStreamEventFlagItemRemoved) == kFSEventStreamEventFlagItemRemoved;
|
||||
bool isModified = (eventFlags[i] & kFSEventStreamEventFlagItemModified) == kFSEventStreamEventFlagItemModified ||
|
||||
(eventFlags[i] & kFSEventStreamEventFlagItemInodeMetaMod) == kFSEventStreamEventFlagItemInodeMetaMod ||
|
||||
(eventFlags[i] & kFSEventStreamEventFlagItemFinderInfoMod) == kFSEventStreamEventFlagItemFinderInfoMod ||
|
||||
(eventFlags[i] & kFSEventStreamEventFlagItemChangeOwner) == kFSEventStreamEventFlagItemChangeOwner ||
|
||||
(eventFlags[i] & kFSEventStreamEventFlagItemXattrMod) == kFSEventStreamEventFlagItemXattrMod;
|
||||
bool isRenamed = (eventFlags[i] & kFSEventStreamEventFlagItemRenamed) == kFSEventStreamEventFlagItemRenamed;
|
||||
bool isDone = (eventFlags[i] & kFSEventStreamEventFlagHistoryDone) == kFSEventStreamEventFlagHistoryDone;
|
||||
bool isDir = (eventFlags[i] & kFSEventStreamEventFlagItemIsDir) == kFSEventStreamEventFlagItemIsDir;
|
||||
|
||||
|
||||
if (eventFlags[i] & kFSEventStreamEventFlagMustScanSubDirs) {
|
||||
if (eventFlags[i] & kFSEventStreamEventFlagUserDropped) {
|
||||
list.error("Events were dropped by the FSEvents client. File system must be re-scanned.");
|
||||
} else if (eventFlags[i] & kFSEventStreamEventFlagKernelDropped) {
|
||||
list.error("Events were dropped by the kernel. File system must be re-scanned.");
|
||||
} else {
|
||||
list.error("Too many events. File system must be re-scanned.");
|
||||
}
|
||||
}
|
||||
|
||||
if (isDone) {
|
||||
watcher->notify();
|
||||
break;
|
||||
}
|
||||
|
||||
auto ignoredFlags = IGNORED_FLAGS;
|
||||
if (__builtin_available(macOS 10.13, *)) {
|
||||
ignoredFlags |= kFSEventStreamEventFlagItemCloned;
|
||||
}
|
||||
|
||||
// If we don't care about any of the flags that are set, ignore this event.
|
||||
if ((eventFlags[i] & ~ignoredFlags) == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// FSEvents exclusion paths only apply to files, not directories.
|
||||
if (watcher->isIgnored(paths[i])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle unambiguous events first
|
||||
if (isCreated && !(isRemoved || isModified || isRenamed)) {
|
||||
state->tree->add(paths[i], 0, isDir);
|
||||
list.create(paths[i]);
|
||||
} else if (isRemoved && !(isCreated || isModified || isRenamed)) {
|
||||
state->tree->remove(paths[i]);
|
||||
list.remove(paths[i]);
|
||||
if (paths[i] == watcher->mDir) {
|
||||
deletedRoot = true;
|
||||
}
|
||||
} else if (isModified && !(isCreated || isRemoved || isRenamed)) {
|
||||
struct stat file;
|
||||
if (stat(paths[i], &file)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore if mtime is the same as the last event.
|
||||
// This prevents duplicate events from being emitted.
|
||||
// If tv_nsec is zero, the file system probably only has second-level
|
||||
// granularity so allow the even through in that case.
|
||||
uint64_t mtime = CONVERT_TIME(file.st_mtimespec);
|
||||
DirEntry *entry = state->tree->find(paths[i]);
|
||||
if (entry && mtime == entry->mtime && file.st_mtimespec.tv_nsec != 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry) {
|
||||
// Update mtime.
|
||||
entry->mtime = mtime;
|
||||
} else {
|
||||
// Add to tree if this path has not been discovered yet.
|
||||
state->tree->add(paths[i], mtime, S_ISDIR(file.st_mode));
|
||||
}
|
||||
|
||||
list.update(paths[i]);
|
||||
} else {
|
||||
// If multiple flags were set, then we need to call `stat` to determine if the file really exists.
|
||||
// This helps disambiguate creates, updates, and deletes.
|
||||
struct stat file;
|
||||
if (stat(paths[i], &file) || !pathExists(paths[i])) {
|
||||
// File does not exist, so we have to assume it was removed. This is not exact since the
|
||||
// flags set by fsevents get coalesced together (e.g. created & deleted), so there is no way to
|
||||
// know whether the create and delete both happened since our snapshot (in which case
|
||||
// we'd rather ignore this event completely). This will result in some extra delete events
|
||||
// being emitted for files we don't know about, but that is the best we can do.
|
||||
state->tree->remove(paths[i]);
|
||||
list.remove(paths[i]);
|
||||
if (paths[i] == watcher->mDir) {
|
||||
deletedRoot = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the file was modified, and existed before, then this is an update, otherwise a create.
|
||||
uint64_t ctime = CONVERT_TIME(file.st_birthtimespec);
|
||||
uint64_t mtime = CONVERT_TIME(file.st_mtimespec);
|
||||
DirEntry *entry = !since ? state->tree->find(paths[i]) : NULL;
|
||||
if (entry && entry->mtime == mtime && file.st_mtimespec.tv_nsec != 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Some mounted file systems report a creation time of 0/unix epoch which we special case.
|
||||
if (isModified && (entry || (ctime <= since && ctime != 0))) {
|
||||
state->tree->update(paths[i], mtime);
|
||||
list.update(paths[i]);
|
||||
} else {
|
||||
state->tree->add(paths[i], mtime, S_ISDIR(file.st_mode));
|
||||
list.create(paths[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!since) {
|
||||
watcher->notify();
|
||||
}
|
||||
|
||||
// Stop watching if the root directory was deleted.
|
||||
if (deletedRoot) {
|
||||
stopStream((FSEventStreamRef)streamRef, CFRunLoopGetCurrent());
|
||||
watcher->state = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void checkWatcher(WatcherRef watcher) {
|
||||
struct stat file;
|
||||
if (stat(watcher->mDir.c_str(), &file)) {
|
||||
throw WatcherError(strerror(errno), watcher);
|
||||
}
|
||||
|
||||
if (!S_ISDIR(file.st_mode)) {
|
||||
throw WatcherError(strerror(ENOTDIR), watcher);
|
||||
}
|
||||
}
|
||||
|
||||
void FSEventsBackend::startStream(WatcherRef watcher, FSEventStreamEventId id) {
|
||||
checkWatcher(watcher);
|
||||
|
||||
CFAbsoluteTime latency = 0.001;
|
||||
CFStringRef fileWatchPath = CFStringCreateWithCString(
|
||||
NULL,
|
||||
watcher->mDir.c_str(),
|
||||
kCFStringEncodingUTF8
|
||||
);
|
||||
|
||||
CFArrayRef pathsToWatch = CFArrayCreate(
|
||||
NULL,
|
||||
(const void **)&fileWatchPath,
|
||||
1,
|
||||
NULL
|
||||
);
|
||||
|
||||
// Make a watcher reference we can pass into the callback. This ensures bumped ref-count.
|
||||
std::shared_ptr<Watcher>* callbackWatcher = new std::shared_ptr<Watcher> (watcher);
|
||||
FSEventStreamContext callbackInfo {0, static_cast<void*> (callbackWatcher), nullptr, nullptr, nullptr};
|
||||
FSEventStreamRef stream = FSEventStreamCreate(
|
||||
NULL,
|
||||
&FSEventsCallback,
|
||||
&callbackInfo,
|
||||
pathsToWatch,
|
||||
id,
|
||||
latency,
|
||||
kFSEventStreamCreateFlagFileEvents
|
||||
);
|
||||
|
||||
CFMutableArrayRef exclusions = CFArrayCreateMutable(NULL, watcher->mIgnorePaths.size(), NULL);
|
||||
for (auto it = watcher->mIgnorePaths.begin(); it != watcher->mIgnorePaths.end(); it++) {
|
||||
CFStringRef path = CFStringCreateWithCString(
|
||||
NULL,
|
||||
it->c_str(),
|
||||
kCFStringEncodingUTF8
|
||||
);
|
||||
|
||||
CFArrayAppendValue(exclusions, (const void *)path);
|
||||
}
|
||||
|
||||
FSEventStreamSetExclusionPaths(stream, exclusions);
|
||||
|
||||
FSEventStreamScheduleWithRunLoop(stream, mRunLoop, kCFRunLoopDefaultMode);
|
||||
bool started = FSEventStreamStart(stream);
|
||||
|
||||
CFRelease(pathsToWatch);
|
||||
CFRelease(fileWatchPath);
|
||||
|
||||
if (!started) {
|
||||
FSEventStreamRelease(stream);
|
||||
throw WatcherError("Error starting FSEvents stream", watcher);
|
||||
}
|
||||
|
||||
auto stateGuard = watcher->state;
|
||||
State* s = static_cast<State*>(stateGuard.get());
|
||||
s->tree = std::make_shared<DirTree>(watcher->mDir);
|
||||
s->stream = stream;
|
||||
}
|
||||
|
||||
void FSEventsBackend::start() {
|
||||
mRunLoop = CFRunLoopGetCurrent();
|
||||
CFRetain(mRunLoop);
|
||||
|
||||
// Unlock once run loop has started.
|
||||
CFRunLoopPerformBlock(mRunLoop, kCFRunLoopDefaultMode, ^ {
|
||||
notifyStarted();
|
||||
});
|
||||
|
||||
CFRunLoopWakeUp(mRunLoop);
|
||||
CFRunLoopRun();
|
||||
}
|
||||
|
||||
FSEventsBackend::~FSEventsBackend() {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
CFRunLoopStop(mRunLoop);
|
||||
CFRelease(mRunLoop);
|
||||
}
|
||||
|
||||
void FSEventsBackend::writeSnapshot(WatcherRef watcher, std::string *snapshotPath) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
checkWatcher(watcher);
|
||||
|
||||
FSEventStreamEventId id = FSEventsGetCurrentEventId();
|
||||
std::ofstream ofs(*snapshotPath);
|
||||
ofs << id;
|
||||
ofs << "\n";
|
||||
|
||||
struct timespec now;
|
||||
clock_gettime(CLOCK_REALTIME, &now);
|
||||
ofs << CONVERT_TIME(now);
|
||||
}
|
||||
|
||||
void FSEventsBackend::getEventsSince(WatcherRef watcher, std::string *snapshotPath) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
std::ifstream ifs(*snapshotPath);
|
||||
if (ifs.fail()) {
|
||||
return;
|
||||
}
|
||||
|
||||
FSEventStreamEventId id;
|
||||
uint64_t since;
|
||||
ifs >> id;
|
||||
ifs >> since;
|
||||
|
||||
auto s = std::make_shared<State>();
|
||||
s->since = since;
|
||||
watcher->state = s;
|
||||
|
||||
startStream(watcher, id);
|
||||
watcher->wait();
|
||||
stopStream(s->stream, mRunLoop);
|
||||
|
||||
watcher->state = nullptr;
|
||||
}
|
||||
|
||||
// This function is called by Backend::watch which takes a lock on mMutex
|
||||
void FSEventsBackend::subscribe(WatcherRef watcher) {
|
||||
auto s = std::make_shared<State>();
|
||||
s->since = 0;
|
||||
watcher->state = s;
|
||||
startStream(watcher, kFSEventStreamEventIdSinceNow);
|
||||
}
|
||||
|
||||
// This function is called by Backend::unwatch which takes a lock on mMutex
|
||||
void FSEventsBackend::unsubscribe(WatcherRef watcher) {
|
||||
auto stateGuard = watcher->state;
|
||||
State* s = static_cast<State*>(stateGuard.get());
|
||||
if (s != nullptr) {
|
||||
stopStream(s->stream, mRunLoop);
|
||||
watcher->state = nullptr;
|
||||
}
|
||||
}
|
||||
20
lars/node_modules/@parcel/watcher/src/macos/FSEventsBackend.hh
generated
vendored
Normal file
20
lars/node_modules/@parcel/watcher/src/macos/FSEventsBackend.hh
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef FS_EVENTS_H
|
||||
#define FS_EVENTS_H
|
||||
|
||||
#include <CoreServices/CoreServices.h>
|
||||
#include "../Backend.hh"
|
||||
|
||||
class FSEventsBackend : public Backend {
|
||||
public:
|
||||
void start() override;
|
||||
~FSEventsBackend();
|
||||
void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) override;
|
||||
void getEventsSince(WatcherRef watcher, std::string *snapshotPath) override;
|
||||
void subscribe(WatcherRef watcher) override;
|
||||
void unsubscribe(WatcherRef watcher) override;
|
||||
private:
|
||||
void startStream(WatcherRef watcher, FSEventStreamEventId id);
|
||||
CFRunLoopRef mRunLoop;
|
||||
};
|
||||
|
||||
#endif
|
||||
41
lars/node_modules/@parcel/watcher/src/shared/BruteForceBackend.cc
generated
vendored
Normal file
41
lars/node_modules/@parcel/watcher/src/shared/BruteForceBackend.cc
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
#include <string>
|
||||
#include "../DirTree.hh"
|
||||
#include "../Event.hh"
|
||||
#include "./BruteForceBackend.hh"
|
||||
|
||||
std::shared_ptr<DirTree> BruteForceBackend::getTree(WatcherRef watcher, bool shouldRead) {
|
||||
auto tree = DirTree::getCached(watcher->mDir);
|
||||
|
||||
// If the tree is not complete, read it if needed.
|
||||
if (!tree->isComplete && shouldRead) {
|
||||
readTree(watcher, tree);
|
||||
tree->isComplete = true;
|
||||
}
|
||||
|
||||
return tree;
|
||||
}
|
||||
|
||||
void BruteForceBackend::writeSnapshot(WatcherRef watcher, std::string *snapshotPath) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
auto tree = getTree(watcher);
|
||||
FILE *f = fopen(snapshotPath->c_str(), "w");
|
||||
if (!f) {
|
||||
throw std::runtime_error(std::string("Unable to open snapshot file: ") + strerror(errno));
|
||||
}
|
||||
|
||||
tree->write(f);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void BruteForceBackend::getEventsSince(WatcherRef watcher, std::string *snapshotPath) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
FILE *f = fopen(snapshotPath->c_str(), "r");
|
||||
if (!f) {
|
||||
throw std::runtime_error(std::string("Unable to open snapshot file: ") + strerror(errno));
|
||||
}
|
||||
|
||||
DirTree snapshot{watcher->mDir, f};
|
||||
auto now = getTree(watcher);
|
||||
now->getChanges(&snapshot, watcher->mEvents);
|
||||
fclose(f);
|
||||
}
|
||||
25
lars/node_modules/@parcel/watcher/src/shared/BruteForceBackend.hh
generated
vendored
Normal file
25
lars/node_modules/@parcel/watcher/src/shared/BruteForceBackend.hh
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
#ifndef BRUTE_FORCE_H
|
||||
#define BRUTE_FORCE_H
|
||||
|
||||
#include "../Backend.hh"
|
||||
#include "../DirTree.hh"
|
||||
#include "../Watcher.hh"
|
||||
|
||||
class BruteForceBackend : public Backend {
|
||||
public:
|
||||
void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) override;
|
||||
void getEventsSince(WatcherRef watcher, std::string *snapshotPath) override;
|
||||
void subscribe(WatcherRef watcher) override {
|
||||
throw "Brute force backend doesn't support subscriptions.";
|
||||
}
|
||||
|
||||
void unsubscribe(WatcherRef watcher) override {
|
||||
throw "Brute force backend doesn't support subscriptions.";
|
||||
}
|
||||
|
||||
std::shared_ptr<DirTree> getTree(WatcherRef watcher, bool shouldRead = true);
|
||||
private:
|
||||
void readTree(WatcherRef watcher, std::shared_ptr<DirTree> tree);
|
||||
};
|
||||
|
||||
#endif
|
||||
50
lars/node_modules/@parcel/watcher/src/unix/fts.cc
generated
vendored
Normal file
50
lars/node_modules/@parcel/watcher/src/unix/fts.cc
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
#include <string>
|
||||
|
||||
// weird error on linux
|
||||
#ifdef __THROW
|
||||
#undef __THROW
|
||||
#endif
|
||||
#define __THROW
|
||||
|
||||
#include <fts.h>
|
||||
#include <sys/stat.h>
|
||||
#include "../DirTree.hh"
|
||||
#include "../shared/BruteForceBackend.hh"
|
||||
|
||||
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
|
||||
#if __APPLE__
|
||||
#define st_mtim st_mtimespec
|
||||
#endif
|
||||
|
||||
void BruteForceBackend::readTree(WatcherRef watcher, std::shared_ptr<DirTree> tree) {
|
||||
char *paths[2] {(char *)watcher->mDir.c_str(), NULL};
|
||||
FTS *fts = fts_open(paths, FTS_NOCHDIR | FTS_PHYSICAL, NULL);
|
||||
if (!fts) {
|
||||
throw WatcherError(strerror(errno), watcher);
|
||||
}
|
||||
|
||||
FTSENT *node;
|
||||
bool isRoot = true;
|
||||
|
||||
while ((node = fts_read(fts)) != NULL) {
|
||||
if (node->fts_errno) {
|
||||
fts_close(fts);
|
||||
throw WatcherError(strerror(node->fts_errno), watcher);
|
||||
}
|
||||
|
||||
if (isRoot && !(node->fts_info & FTS_D)) {
|
||||
fts_close(fts);
|
||||
throw WatcherError(strerror(ENOTDIR), watcher);
|
||||
}
|
||||
|
||||
if (watcher->isIgnored(std::string(node->fts_path))) {
|
||||
fts_set(fts, node, FTS_SKIP);
|
||||
continue;
|
||||
}
|
||||
|
||||
tree->add(node->fts_path, CONVERT_TIME(node->fts_statp->st_mtim), (node->fts_info & FTS_D) == FTS_D);
|
||||
isRoot = false;
|
||||
}
|
||||
|
||||
fts_close(fts);
|
||||
}
|
||||
77
lars/node_modules/@parcel/watcher/src/unix/legacy.cc
generated
vendored
Normal file
77
lars/node_modules/@parcel/watcher/src/unix/legacy.cc
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
#include <string>
|
||||
|
||||
// weird error on linux
|
||||
#ifdef __THROW
|
||||
#undef __THROW
|
||||
#endif
|
||||
#define __THROW
|
||||
|
||||
#ifdef _LIBC
|
||||
# include <include/sys/stat.h>
|
||||
#else
|
||||
# include <sys/stat.h>
|
||||
#endif
|
||||
#include <dirent.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "../DirTree.hh"
|
||||
#include "../shared/BruteForceBackend.hh"
|
||||
|
||||
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
|
||||
#if __APPLE__
|
||||
#define st_mtim st_mtimespec
|
||||
#endif
|
||||
#define ISDOT(a) (a[0] == '.' && (!a[1] || (a[1] == '.' && !a[2])))
|
||||
|
||||
void iterateDir(WatcherRef watcher, const std::shared_ptr <DirTree> tree, const char *relative, int parent_fd, const std::string &dirname) {
|
||||
int open_flags = (O_RDONLY | O_CLOEXEC | O_DIRECTORY | O_NOCTTY | O_NONBLOCK | O_NOFOLLOW);
|
||||
int new_fd = openat(parent_fd, relative, open_flags);
|
||||
if (new_fd == -1) {
|
||||
if (errno == EACCES) {
|
||||
return; // ignore insufficient permissions
|
||||
}
|
||||
|
||||
throw WatcherError(strerror(errno), watcher);
|
||||
}
|
||||
|
||||
struct stat rootAttributes;
|
||||
fstatat(new_fd, ".", &rootAttributes, AT_SYMLINK_NOFOLLOW);
|
||||
tree->add(dirname, CONVERT_TIME(rootAttributes.st_mtim), true);
|
||||
|
||||
if (DIR *dir = fdopendir(new_fd)) {
|
||||
while (struct dirent *ent = (errno = 0, readdir(dir))) {
|
||||
if (ISDOT(ent->d_name)) continue;
|
||||
|
||||
std::string fullPath = dirname + "/" + ent->d_name;
|
||||
|
||||
if (!watcher->isIgnored(fullPath)) {
|
||||
struct stat attrib;
|
||||
fstatat(new_fd, ent->d_name, &attrib, AT_SYMLINK_NOFOLLOW);
|
||||
bool isDir = ent->d_type == DT_DIR;
|
||||
|
||||
if (isDir) {
|
||||
iterateDir(watcher, tree, ent->d_name, new_fd, fullPath);
|
||||
} else {
|
||||
tree->add(fullPath, CONVERT_TIME(attrib.st_mtim), isDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
} else {
|
||||
close(new_fd);
|
||||
}
|
||||
|
||||
if (errno) {
|
||||
throw WatcherError(strerror(errno), watcher);
|
||||
}
|
||||
}
|
||||
|
||||
void BruteForceBackend::readTree(WatcherRef watcher, std::shared_ptr <DirTree> tree) {
|
||||
int fd = open(watcher->mDir.c_str(), O_RDONLY);
|
||||
if (fd) {
|
||||
iterateDir(watcher, tree, ".", fd, watcher->mDir);
|
||||
close(fd);
|
||||
}
|
||||
}
|
||||
132
lars/node_modules/@parcel/watcher/src/wasm/WasmBackend.cc
generated
vendored
Normal file
132
lars/node_modules/@parcel/watcher/src/wasm/WasmBackend.cc
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
#include <sys/stat.h>
|
||||
#include "WasmBackend.hh"
|
||||
|
||||
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
|
||||
|
||||
void WasmBackend::start() {
|
||||
notifyStarted();
|
||||
}
|
||||
|
||||
void WasmBackend::subscribe(WatcherRef watcher) {
|
||||
// Build a full directory tree recursively, and watch each directory.
|
||||
std::shared_ptr<DirTree> tree = getTree(watcher);
|
||||
|
||||
for (auto it = tree->entries.begin(); it != tree->entries.end(); it++) {
|
||||
if (it->second.isDir) {
|
||||
watchDir(watcher, it->second.path, tree);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WasmBackend::watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree) {
|
||||
int wd = wasm_backend_add_watch(path.c_str(), (void *)this);
|
||||
std::shared_ptr<WasmSubscription> sub = std::make_shared<WasmSubscription>();
|
||||
sub->tree = tree;
|
||||
sub->path = path;
|
||||
sub->watcher = watcher;
|
||||
mSubscriptions.emplace(wd, sub);
|
||||
}
|
||||
|
||||
extern "C" void wasm_backend_event_handler(void *backend, int wd, int type, char *filename) {
|
||||
WasmBackend *b = (WasmBackend *)(backend);
|
||||
b->handleEvent(wd, type, filename);
|
||||
}
|
||||
|
||||
void WasmBackend::handleEvent(int wd, int type, char *filename) {
|
||||
// Find the subscriptions for this watch descriptor
|
||||
auto range = mSubscriptions.equal_range(wd);
|
||||
std::unordered_set<std::shared_ptr<WasmSubscription>> set;
|
||||
for (auto it = range.first; it != range.second; it++) {
|
||||
set.insert(it->second);
|
||||
}
|
||||
|
||||
for (auto it = set.begin(); it != set.end(); it++) {
|
||||
if (handleSubscription(type, filename, *it)) {
|
||||
(*it)->watcher->notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool WasmBackend::handleSubscription(int type, char *filename, std::shared_ptr<WasmSubscription> sub) {
|
||||
// Build full path and check if its in our ignore list.
|
||||
WatcherRef watcher = sub->watcher;
|
||||
std::string path = std::string(sub->path);
|
||||
|
||||
if (filename[0] != '\0') {
|
||||
path += "/" + std::string(filename);
|
||||
}
|
||||
|
||||
if (watcher->isIgnored(path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (type == 1) {
|
||||
struct stat st;
|
||||
stat(path.c_str(), &st);
|
||||
sub->tree->update(path, CONVERT_TIME(st.st_mtim));
|
||||
watcher->mEvents.update(path);
|
||||
} else if (type == 2) {
|
||||
// Determine if this is a create or delete depending on if the file exists or not.
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st)) {
|
||||
// If the entry being deleted/moved is a directory, remove it from the list of subscriptions
|
||||
DirEntry *entry = sub->tree->find(path);
|
||||
if (!entry) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (entry->isDir) {
|
||||
std::string pathStart = path + DIR_SEP;
|
||||
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
|
||||
if (it->second->path == path || it->second->path.rfind(pathStart, 0) == 0) {
|
||||
wasm_backend_remove_watch(it->first);
|
||||
it = mSubscriptions.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all sub-entries
|
||||
for (auto it = sub->tree->entries.begin(); it != sub->tree->entries.end();) {
|
||||
if (it->first.rfind(pathStart, 0) == 0) {
|
||||
watcher->mEvents.remove(it->first);
|
||||
it = sub->tree->entries.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
watcher->mEvents.remove(path);
|
||||
sub->tree->remove(path);
|
||||
} else if (sub->tree->find(path)) {
|
||||
sub->tree->update(path, CONVERT_TIME(st.st_mtim));
|
||||
watcher->mEvents.update(path);
|
||||
} else {
|
||||
watcher->mEvents.create(path);
|
||||
|
||||
// If this is a create, check if it's a directory and start watching if it is.
|
||||
DirEntry *entry = sub->tree->add(path, CONVERT_TIME(st.st_mtim), S_ISDIR(st.st_mode));
|
||||
if (entry->isDir) {
|
||||
watchDir(watcher, path, sub->tree);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void WasmBackend::unsubscribe(WatcherRef watcher) {
|
||||
// Find any subscriptions pointing to this watcher, and remove them.
|
||||
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
|
||||
if (it->second->watcher.get() == watcher.get()) {
|
||||
if (mSubscriptions.count(it->first) == 1) {
|
||||
wasm_backend_remove_watch(it->first);
|
||||
}
|
||||
|
||||
it = mSubscriptions.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
34
lars/node_modules/@parcel/watcher/src/wasm/WasmBackend.hh
generated
vendored
Normal file
34
lars/node_modules/@parcel/watcher/src/wasm/WasmBackend.hh
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#ifndef WASM_H
|
||||
#define WASM_H
|
||||
|
||||
#include <unordered_map>
|
||||
#include "../shared/BruteForceBackend.hh"
|
||||
#include "../DirTree.hh"
|
||||
|
||||
extern "C" {
|
||||
int wasm_backend_add_watch(const char *filename, void *backend);
|
||||
void wasm_backend_remove_watch(int wd);
|
||||
void wasm_backend_event_handler(void *backend, int wd, int type, char *filename);
|
||||
};
|
||||
|
||||
struct WasmSubscription {
|
||||
std::shared_ptr<DirTree> tree;
|
||||
std::string path;
|
||||
WatcherRef watcher;
|
||||
};
|
||||
|
||||
class WasmBackend : public BruteForceBackend {
|
||||
public:
|
||||
void start() override;
|
||||
void subscribe(WatcherRef watcher) override;
|
||||
void unsubscribe(WatcherRef watcher) override;
|
||||
void handleEvent(int wd, int type, char *filename);
|
||||
private:
|
||||
int mWasm;
|
||||
std::unordered_multimap<int, std::shared_ptr<WasmSubscription>> mSubscriptions;
|
||||
|
||||
void watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree);
|
||||
bool handleSubscription(int type, char *filename, std::shared_ptr<WasmSubscription> sub);
|
||||
};
|
||||
|
||||
#endif
|
||||
74
lars/node_modules/@parcel/watcher/src/wasm/include.h
generated
vendored
Normal file
74
lars/node_modules/@parcel/watcher/src/wasm/include.h
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright Node.js contributors. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
// Node does not include the headers for these functions when compiling for WASM, so add them here.
|
||||
#ifdef __wasm32__
|
||||
extern "C" {
|
||||
NAPI_EXTERN napi_status NAPI_CDECL
|
||||
napi_create_threadsafe_function(napi_env env,
|
||||
napi_value func,
|
||||
napi_value async_resource,
|
||||
napi_value async_resource_name,
|
||||
size_t max_queue_size,
|
||||
size_t initial_thread_count,
|
||||
void* thread_finalize_data,
|
||||
napi_finalize thread_finalize_cb,
|
||||
void* context,
|
||||
napi_threadsafe_function_call_js call_js_cb,
|
||||
napi_threadsafe_function* result);
|
||||
|
||||
NAPI_EXTERN napi_status NAPI_CDECL napi_get_threadsafe_function_context(
|
||||
napi_threadsafe_function func, void** result);
|
||||
|
||||
NAPI_EXTERN napi_status NAPI_CDECL
|
||||
napi_call_threadsafe_function(napi_threadsafe_function func,
|
||||
void* data,
|
||||
napi_threadsafe_function_call_mode is_blocking);
|
||||
|
||||
NAPI_EXTERN napi_status NAPI_CDECL
|
||||
napi_acquire_threadsafe_function(napi_threadsafe_function func);
|
||||
|
||||
NAPI_EXTERN napi_status NAPI_CDECL napi_release_threadsafe_function(
|
||||
napi_threadsafe_function func, napi_threadsafe_function_release_mode mode);
|
||||
|
||||
NAPI_EXTERN napi_status NAPI_CDECL
|
||||
napi_unref_threadsafe_function(napi_env env, napi_threadsafe_function func);
|
||||
|
||||
NAPI_EXTERN napi_status NAPI_CDECL
|
||||
napi_ref_threadsafe_function(napi_env env, napi_threadsafe_function func);
|
||||
|
||||
NAPI_EXTERN napi_status NAPI_CDECL
|
||||
napi_create_async_work(napi_env env,
|
||||
napi_value async_resource,
|
||||
napi_value async_resource_name,
|
||||
napi_async_execute_callback execute,
|
||||
napi_async_complete_callback complete,
|
||||
void* data,
|
||||
napi_async_work* result);
|
||||
NAPI_EXTERN napi_status NAPI_CDECL napi_delete_async_work(napi_env env,
|
||||
napi_async_work work);
|
||||
NAPI_EXTERN napi_status NAPI_CDECL napi_queue_async_work(napi_env env,
|
||||
napi_async_work work);
|
||||
NAPI_EXTERN napi_status NAPI_CDECL napi_cancel_async_work(napi_env env,
|
||||
napi_async_work work);
|
||||
}
|
||||
#endif
|
||||
302
lars/node_modules/@parcel/watcher/src/watchman/BSER.cc
generated
vendored
Normal file
302
lars/node_modules/@parcel/watcher/src/watchman/BSER.cc
generated
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
#include <stdint.h>
|
||||
#include "./BSER.hh"
|
||||
|
||||
BSERType decodeType(std::istream &iss) {
|
||||
int8_t type;
|
||||
iss.read(reinterpret_cast<char*>(&type), sizeof(type));
|
||||
return (BSERType) type;
|
||||
}
|
||||
|
||||
void expectType(std::istream &iss, BSERType expected) {
|
||||
BSERType got = decodeType(iss);
|
||||
if (got != expected) {
|
||||
throw std::runtime_error("Unexpected BSER type");
|
||||
}
|
||||
}
|
||||
|
||||
void encodeType(std::ostream &oss, BSERType type) {
|
||||
int8_t t = (int8_t)type;
|
||||
oss.write(reinterpret_cast<char*>(&t), sizeof(t));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
class Value : public BSERValue {
|
||||
public:
|
||||
T value;
|
||||
Value(T val) {
|
||||
value = val;
|
||||
}
|
||||
|
||||
Value() {}
|
||||
};
|
||||
|
||||
class BSERInteger : public Value<int64_t> {
|
||||
public:
|
||||
BSERInteger(int64_t value) : Value(value) {}
|
||||
BSERInteger(std::istream &iss) {
|
||||
int8_t int8;
|
||||
int16_t int16;
|
||||
int32_t int32;
|
||||
int64_t int64;
|
||||
|
||||
BSERType type = decodeType(iss);
|
||||
|
||||
switch (type) {
|
||||
case BSER_INT8:
|
||||
iss.read(reinterpret_cast<char*>(&int8), sizeof(int8));
|
||||
value = int8;
|
||||
break;
|
||||
case BSER_INT16:
|
||||
iss.read(reinterpret_cast<char*>(&int16), sizeof(int16));
|
||||
value = int16;
|
||||
break;
|
||||
case BSER_INT32:
|
||||
iss.read(reinterpret_cast<char*>(&int32), sizeof(int32));
|
||||
value = int32;
|
||||
break;
|
||||
case BSER_INT64:
|
||||
iss.read(reinterpret_cast<char*>(&int64), sizeof(int64));
|
||||
value = int64;
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Invalid BSER int type");
|
||||
}
|
||||
}
|
||||
|
||||
int64_t intValue() override {
|
||||
return value;
|
||||
}
|
||||
|
||||
void encode(std::ostream &oss) override {
|
||||
if (value <= INT8_MAX) {
|
||||
encodeType(oss, BSER_INT8);
|
||||
int8_t v = (int8_t)value;
|
||||
oss.write(reinterpret_cast<char*>(&v), sizeof(v));
|
||||
} else if (value <= INT16_MAX) {
|
||||
encodeType(oss, BSER_INT16);
|
||||
int16_t v = (int16_t)value;
|
||||
oss.write(reinterpret_cast<char*>(&v), sizeof(v));
|
||||
} else if (value <= INT32_MAX) {
|
||||
encodeType(oss, BSER_INT32);
|
||||
int32_t v = (int32_t)value;
|
||||
oss.write(reinterpret_cast<char*>(&v), sizeof(v));
|
||||
} else {
|
||||
encodeType(oss, BSER_INT64);
|
||||
oss.write(reinterpret_cast<char*>(&value), sizeof(value));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class BSERArray : public Value<BSER::Array> {
|
||||
public:
|
||||
BSERArray() : Value() {}
|
||||
BSERArray(BSER::Array value) : Value(value) {}
|
||||
BSERArray(std::istream &iss) {
|
||||
expectType(iss, BSER_ARRAY);
|
||||
int64_t len = BSERInteger(iss).intValue();
|
||||
for (int64_t i = 0; i < len; i++) {
|
||||
value.push_back(BSER(iss));
|
||||
}
|
||||
}
|
||||
|
||||
BSER::Array arrayValue() override {
|
||||
return value;
|
||||
}
|
||||
|
||||
void encode(std::ostream &oss) override {
|
||||
encodeType(oss, BSER_ARRAY);
|
||||
BSERInteger(value.size()).encode(oss);
|
||||
for (auto it = value.begin(); it != value.end(); it++) {
|
||||
it->encode(oss);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class BSERString : public Value<std::string> {
|
||||
public:
|
||||
BSERString(std::string value) : Value(value) {}
|
||||
BSERString(std::istream &iss) {
|
||||
expectType(iss, BSER_STRING);
|
||||
int64_t len = BSERInteger(iss).intValue();
|
||||
value.resize(len);
|
||||
iss.read(&value[0], len);
|
||||
}
|
||||
|
||||
std::string stringValue() override {
|
||||
return value;
|
||||
}
|
||||
|
||||
void encode(std::ostream &oss) override {
|
||||
encodeType(oss, BSER_STRING);
|
||||
BSERInteger(value.size()).encode(oss);
|
||||
oss << value;
|
||||
}
|
||||
};
|
||||
|
||||
class BSERObject : public Value<BSER::Object> {
|
||||
public:
|
||||
BSERObject() : Value() {}
|
||||
BSERObject(BSER::Object value) : Value(value) {}
|
||||
BSERObject(std::istream &iss) {
|
||||
expectType(iss, BSER_OBJECT);
|
||||
int64_t len = BSERInteger(iss).intValue();
|
||||
for (int64_t i = 0; i < len; i++) {
|
||||
auto key = BSERString(iss).stringValue();
|
||||
auto val = BSER(iss);
|
||||
value.emplace(key, val);
|
||||
}
|
||||
}
|
||||
|
||||
BSER::Object objectValue() override {
|
||||
return value;
|
||||
}
|
||||
|
||||
void encode(std::ostream &oss) override {
|
||||
encodeType(oss, BSER_OBJECT);
|
||||
BSERInteger(value.size()).encode(oss);
|
||||
for (auto it = value.begin(); it != value.end(); it++) {
|
||||
BSERString(it->first).encode(oss);
|
||||
it->second.encode(oss);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class BSERDouble : public Value<double> {
|
||||
public:
|
||||
BSERDouble(double value) : Value(value) {}
|
||||
BSERDouble(std::istream &iss) {
|
||||
expectType(iss, BSER_REAL);
|
||||
iss.read(reinterpret_cast<char*>(&value), sizeof(value));
|
||||
}
|
||||
|
||||
double doubleValue() override {
|
||||
return value;
|
||||
}
|
||||
|
||||
void encode(std::ostream &oss) override {
|
||||
encodeType(oss, BSER_REAL);
|
||||
oss.write(reinterpret_cast<char*>(&value), sizeof(value));
|
||||
}
|
||||
};
|
||||
|
||||
class BSERBoolean : public Value<bool> {
|
||||
public:
|
||||
BSERBoolean(bool value) : Value(value) {}
|
||||
bool boolValue() override { return value; }
|
||||
void encode(std::ostream &oss) override {
|
||||
int8_t t = value == true ? BSER_BOOL_TRUE : BSER_BOOL_FALSE;
|
||||
oss.write(reinterpret_cast<char*>(&t), sizeof(t));
|
||||
}
|
||||
};
|
||||
|
||||
class BSERNull : public Value<bool> {
|
||||
public:
|
||||
BSERNull() : Value(false) {}
|
||||
void encode(std::ostream &oss) override {
|
||||
encodeType(oss, BSER_NULL);
|
||||
}
|
||||
};
|
||||
|
||||
std::shared_ptr<BSERArray> decodeTemplate(std::istream &iss) {
|
||||
expectType(iss, BSER_TEMPLATE);
|
||||
auto keys = BSERArray(iss).arrayValue();
|
||||
auto len = BSERInteger(iss).intValue();
|
||||
std::shared_ptr<BSERArray> arr = std::make_shared<BSERArray>();
|
||||
for (int64_t i = 0; i < len; i++) {
|
||||
BSER::Object obj;
|
||||
for (auto it = keys.begin(); it != keys.end(); it++) {
|
||||
if (iss.peek() == 0x0c) {
|
||||
iss.ignore(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto val = BSER(iss);
|
||||
obj.emplace(it->stringValue(), val);
|
||||
}
|
||||
arr->value.push_back(obj);
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
BSER::BSER(std::istream &iss) {
|
||||
BSERType type = decodeType(iss);
|
||||
iss.unget();
|
||||
|
||||
switch (type) {
|
||||
case BSER_ARRAY:
|
||||
m_ptr = std::make_shared<BSERArray>(iss);
|
||||
break;
|
||||
case BSER_OBJECT:
|
||||
m_ptr = std::make_shared<BSERObject>(iss);
|
||||
break;
|
||||
case BSER_STRING:
|
||||
m_ptr = std::make_shared<BSERString>(iss);
|
||||
break;
|
||||
case BSER_INT8:
|
||||
case BSER_INT16:
|
||||
case BSER_INT32:
|
||||
case BSER_INT64:
|
||||
m_ptr = std::make_shared<BSERInteger>(iss);
|
||||
break;
|
||||
case BSER_REAL:
|
||||
m_ptr = std::make_shared<BSERDouble>(iss);
|
||||
break;
|
||||
case BSER_BOOL_TRUE:
|
||||
iss.ignore(1);
|
||||
m_ptr = std::make_shared<BSERBoolean>(true);
|
||||
break;
|
||||
case BSER_BOOL_FALSE:
|
||||
iss.ignore(1);
|
||||
m_ptr = std::make_shared<BSERBoolean>(false);
|
||||
break;
|
||||
case BSER_NULL:
|
||||
iss.ignore(1);
|
||||
m_ptr = std::make_shared<BSERNull>();
|
||||
break;
|
||||
case BSER_TEMPLATE:
|
||||
m_ptr = decodeTemplate(iss);
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("unknown BSER type");
|
||||
}
|
||||
}
|
||||
|
||||
BSER::BSER() : m_ptr(std::make_shared<BSERNull>()) {}
|
||||
BSER::BSER(BSER::Array value) : m_ptr(std::make_shared<BSERArray>(value)) {}
|
||||
BSER::BSER(BSER::Object value) : m_ptr(std::make_shared<BSERObject>(value)) {}
|
||||
BSER::BSER(const char *value) : m_ptr(std::make_shared<BSERString>(value)) {}
|
||||
BSER::BSER(std::string value) : m_ptr(std::make_shared<BSERString>(value)) {}
|
||||
BSER::BSER(int64_t value) : m_ptr(std::make_shared<BSERInteger>(value)) {}
|
||||
BSER::BSER(double value) : m_ptr(std::make_shared<BSERDouble>(value)) {}
|
||||
BSER::BSER(bool value) : m_ptr(std::make_shared<BSERBoolean>(value)) {}
|
||||
|
||||
BSER::Array BSER::arrayValue() { return m_ptr->arrayValue(); }
|
||||
BSER::Object BSER::objectValue() { return m_ptr->objectValue(); }
|
||||
std::string BSER::stringValue() { return m_ptr->stringValue(); }
|
||||
int64_t BSER::intValue() { return m_ptr->intValue(); }
|
||||
double BSER::doubleValue() { return m_ptr->doubleValue(); }
|
||||
bool BSER::boolValue() { return m_ptr->boolValue(); }
|
||||
void BSER::encode(std::ostream &oss) {
|
||||
m_ptr->encode(oss);
|
||||
}
|
||||
|
||||
int64_t BSER::decodeLength(std::istream &iss) {
|
||||
char pdu[2];
|
||||
if (!iss.read(pdu, 2) || pdu[0] != 0 || pdu[1] != 1) {
|
||||
throw std::runtime_error("Invalid BSER");
|
||||
}
|
||||
|
||||
return BSERInteger(iss).intValue();
|
||||
}
|
||||
|
||||
std::string BSER::encode() {
|
||||
std::ostringstream oss(std::ios_base::binary);
|
||||
encode(oss);
|
||||
|
||||
std::ostringstream res(std::ios_base::binary);
|
||||
res.write("\x00\x01", 2);
|
||||
|
||||
BSERInteger(oss.str().size()).encode(res);
|
||||
res << oss.str();
|
||||
return res.str();
|
||||
}
|
||||
69
lars/node_modules/@parcel/watcher/src/watchman/BSER.hh
generated
vendored
Normal file
69
lars/node_modules/@parcel/watcher/src/watchman/BSER.hh
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
#ifndef BSER_H
|
||||
#define BSER_H
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
#include <memory>
|
||||
|
||||
enum BSERType {
|
||||
BSER_ARRAY = 0x00,
|
||||
BSER_OBJECT = 0x01,
|
||||
BSER_STRING = 0x02,
|
||||
BSER_INT8 = 0x03,
|
||||
BSER_INT16 = 0x04,
|
||||
BSER_INT32 = 0x05,
|
||||
BSER_INT64 = 0x06,
|
||||
BSER_REAL = 0x07,
|
||||
BSER_BOOL_TRUE = 0x08,
|
||||
BSER_BOOL_FALSE = 0x09,
|
||||
BSER_NULL = 0x0a,
|
||||
BSER_TEMPLATE = 0x0b
|
||||
};
|
||||
|
||||
class BSERValue;
|
||||
|
||||
class BSER {
|
||||
public:
|
||||
typedef std::vector<BSER> Array;
|
||||
typedef std::unordered_map<std::string, BSER> Object;
|
||||
|
||||
BSER();
|
||||
BSER(BSER::Array value);
|
||||
BSER(BSER::Object value);
|
||||
BSER(std::string value);
|
||||
BSER(const char *value);
|
||||
BSER(int64_t value);
|
||||
BSER(double value);
|
||||
BSER(bool value);
|
||||
BSER(std::istream &iss);
|
||||
|
||||
BSER::Array arrayValue();
|
||||
BSER::Object objectValue();
|
||||
std::string stringValue();
|
||||
int64_t intValue();
|
||||
double doubleValue();
|
||||
bool boolValue();
|
||||
void encode(std::ostream &oss);
|
||||
|
||||
static int64_t decodeLength(std::istream &iss);
|
||||
std::string encode();
|
||||
private:
|
||||
std::shared_ptr<BSERValue> m_ptr;
|
||||
};
|
||||
|
||||
class BSERValue {
|
||||
protected:
|
||||
friend class BSER;
|
||||
virtual BSER::Array arrayValue() { return BSER::Array(); }
|
||||
virtual BSER::Object objectValue() { return BSER::Object(); }
|
||||
virtual std::string stringValue() { return std::string(); }
|
||||
virtual int64_t intValue() { return 0; }
|
||||
virtual double doubleValue() { return 0; }
|
||||
virtual bool boolValue() { return false; }
|
||||
virtual void encode(std::ostream &oss) {}
|
||||
virtual ~BSERValue() {}
|
||||
};
|
||||
|
||||
#endif
|
||||
175
lars/node_modules/@parcel/watcher/src/watchman/IPC.hh
generated
vendored
Normal file
175
lars/node_modules/@parcel/watcher/src/watchman/IPC.hh
generated
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
#ifndef IPC_H
|
||||
#define IPC_H
|
||||
|
||||
#include <string>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <winsock2.h>
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <unistd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/un.h>
|
||||
#endif
|
||||
|
||||
class IPC {
|
||||
public:
|
||||
IPC(std::string path) {
|
||||
mStopped = false;
|
||||
#ifdef _WIN32
|
||||
while (true) {
|
||||
mPipe = CreateFile(
|
||||
path.data(), // pipe name
|
||||
GENERIC_READ | GENERIC_WRITE, // read and write access
|
||||
0, // no sharing
|
||||
NULL, // default security attributes
|
||||
OPEN_EXISTING, // opens existing pipe
|
||||
FILE_FLAG_OVERLAPPED, // attributes
|
||||
NULL // no template file
|
||||
);
|
||||
|
||||
if (mPipe != INVALID_HANDLE_VALUE) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (GetLastError() != ERROR_PIPE_BUSY) {
|
||||
throw std::runtime_error("Could not open pipe");
|
||||
}
|
||||
|
||||
// Wait for pipe to become available if it is busy
|
||||
if (!WaitNamedPipe(path.data(), 30000)) {
|
||||
throw std::runtime_error("Error waiting for pipe");
|
||||
}
|
||||
}
|
||||
|
||||
mReader = CreateEvent(NULL, true, false, NULL);
|
||||
mWriter = CreateEvent(NULL, true, false, NULL);
|
||||
#else
|
||||
struct sockaddr_un addr;
|
||||
memset(&addr, 0, sizeof(addr));
|
||||
addr.sun_family = AF_UNIX;
|
||||
strncpy(addr.sun_path, path.c_str(), sizeof(addr.sun_path) - 1);
|
||||
|
||||
mSock = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (connect(mSock, (struct sockaddr *) &addr, sizeof(struct sockaddr_un))) {
|
||||
throw std::runtime_error("Error connecting to socket");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
~IPC() {
|
||||
mStopped = true;
|
||||
#ifdef _WIN32
|
||||
CancelIo(mPipe);
|
||||
CloseHandle(mPipe);
|
||||
CloseHandle(mReader);
|
||||
CloseHandle(mWriter);
|
||||
#else
|
||||
shutdown(mSock, SHUT_RDWR);
|
||||
#endif
|
||||
}
|
||||
|
||||
void write(std::string buf) {
|
||||
#ifdef _WIN32
|
||||
OVERLAPPED overlapped;
|
||||
overlapped.hEvent = mWriter;
|
||||
bool success = WriteFile(
|
||||
mPipe, // pipe handle
|
||||
buf.data(), // message
|
||||
buf.size(), // message length
|
||||
NULL, // bytes written
|
||||
&overlapped // overlapped
|
||||
);
|
||||
|
||||
if (mStopped) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
if (GetLastError() != ERROR_IO_PENDING) {
|
||||
throw std::runtime_error("Write error");
|
||||
}
|
||||
}
|
||||
|
||||
DWORD written;
|
||||
success = GetOverlappedResult(mPipe, &overlapped, &written, true);
|
||||
if (!success) {
|
||||
throw std::runtime_error("GetOverlappedResult failed");
|
||||
}
|
||||
|
||||
if (written != buf.size()) {
|
||||
throw std::runtime_error("Wrong number of bytes written");
|
||||
}
|
||||
#else
|
||||
int r = 0;
|
||||
for (unsigned int i = 0; i != buf.size(); i += r) {
|
||||
r = ::write(mSock, &buf[i], buf.size() - i);
|
||||
if (r == -1) {
|
||||
if (errno == EAGAIN) {
|
||||
r = 0;
|
||||
} else if (mStopped) {
|
||||
return;
|
||||
} else {
|
||||
throw std::runtime_error("Write error");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int read(char *buf, size_t len) {
|
||||
#ifdef _WIN32
|
||||
OVERLAPPED overlapped;
|
||||
overlapped.hEvent = mReader;
|
||||
bool success = ReadFile(
|
||||
mPipe, // pipe handle
|
||||
buf, // buffer to receive reply
|
||||
len, // size of buffer
|
||||
NULL, // number of bytes read
|
||||
&overlapped // overlapped
|
||||
);
|
||||
|
||||
if (!success && !mStopped) {
|
||||
if (GetLastError() != ERROR_IO_PENDING) {
|
||||
throw std::runtime_error("Read error");
|
||||
}
|
||||
}
|
||||
|
||||
DWORD read = 0;
|
||||
success = GetOverlappedResult(mPipe, &overlapped, &read, true);
|
||||
if (!success && !mStopped) {
|
||||
throw std::runtime_error("GetOverlappedResult failed");
|
||||
}
|
||||
|
||||
return read;
|
||||
#else
|
||||
int r = ::read(mSock, buf, len);
|
||||
if (r == 0 && !mStopped) {
|
||||
throw std::runtime_error("Socket ended unexpectedly");
|
||||
}
|
||||
|
||||
if (r < 0) {
|
||||
if (mStopped) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
throw std::runtime_error(strerror(errno));
|
||||
}
|
||||
|
||||
return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
bool mStopped;
|
||||
#ifdef _WIN32
|
||||
HANDLE mPipe;
|
||||
HANDLE mReader;
|
||||
HANDLE mWriter;
|
||||
#else
|
||||
int mSock;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif
|
||||
338
lars/node_modules/@parcel/watcher/src/watchman/WatchmanBackend.cc
generated
vendored
Normal file
338
lars/node_modules/@parcel/watcher/src/watchman/WatchmanBackend.cc
generated
vendored
Normal file
@@ -0,0 +1,338 @@
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <stdlib.h>
|
||||
#include <algorithm>
|
||||
#include "../DirTree.hh"
|
||||
#include "../Event.hh"
|
||||
#include "./BSER.hh"
|
||||
#include "./WatchmanBackend.hh"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include "../windows/win_utils.hh"
|
||||
#define S_ISDIR(mode) ((mode & _S_IFDIR) == _S_IFDIR)
|
||||
#define popen _popen
|
||||
#define pclose _pclose
|
||||
#else
|
||||
#include <sys/stat.h>
|
||||
#define normalizePath(dir) dir
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
BSER readBSER(T &&do_read) {
|
||||
std::stringstream oss;
|
||||
char buffer[256];
|
||||
int r;
|
||||
int64_t len = -1;
|
||||
do {
|
||||
// Start by reading a minimal amount of data in order to decode the length.
|
||||
// After that, attempt to read the remaining length, up to the buffer size.
|
||||
r = do_read(buffer, len == -1 ? 20 : (len < 256 ? len : 256));
|
||||
oss << std::string(buffer, r);
|
||||
|
||||
if (len == -1) {
|
||||
uint64_t l = BSER::decodeLength(oss);
|
||||
len = l + oss.tellg();
|
||||
}
|
||||
|
||||
len -= r;
|
||||
} while (len > 0);
|
||||
|
||||
return BSER(oss);
|
||||
}
|
||||
|
||||
std::string getSockPath() {
|
||||
auto var = getenv("WATCHMAN_SOCK");
|
||||
if (var && *var) {
|
||||
return std::string(var);
|
||||
}
|
||||
|
||||
FILE *fp = popen("watchman --output-encoding=bser get-sockname", "r");
|
||||
if (fp == NULL || errno == ECHILD) {
|
||||
throw std::runtime_error("Failed to execute watchman");
|
||||
}
|
||||
|
||||
BSER b = readBSER([fp] (char *buf, size_t len) {
|
||||
return fread(buf, sizeof(char), len, fp);
|
||||
});
|
||||
|
||||
pclose(fp);
|
||||
|
||||
auto objValue = b.objectValue();
|
||||
auto foundSockname = objValue.find("sockname");
|
||||
if (foundSockname == objValue.end()) {
|
||||
throw std::runtime_error("sockname not found");
|
||||
}
|
||||
return foundSockname->second.stringValue();
|
||||
}
|
||||
|
||||
std::unique_ptr<IPC> watchmanConnect() {
|
||||
std::string path = getSockPath();
|
||||
return std::unique_ptr<IPC>(new IPC(path));
|
||||
}
|
||||
|
||||
BSER watchmanRead(IPC *ipc) {
|
||||
return readBSER([ipc] (char *buf, size_t len) {
|
||||
return ipc->read(buf, len);
|
||||
});
|
||||
}
|
||||
|
||||
BSER::Object WatchmanBackend::watchmanRequest(BSER b) {
|
||||
std::string cmd = b.encode();
|
||||
mIPC->write(cmd);
|
||||
mRequestSignal.notify();
|
||||
|
||||
mResponseSignal.wait();
|
||||
mResponseSignal.reset();
|
||||
|
||||
if (!mError.empty()) {
|
||||
std::runtime_error err = std::runtime_error(mError);
|
||||
mError = std::string();
|
||||
throw err;
|
||||
}
|
||||
|
||||
return mResponse;
|
||||
}
|
||||
|
||||
void WatchmanBackend::watchmanWatch(std::string dir) {
|
||||
std::vector<BSER> cmd;
|
||||
cmd.push_back("watch");
|
||||
cmd.push_back(normalizePath(dir));
|
||||
watchmanRequest(cmd);
|
||||
}
|
||||
|
||||
bool WatchmanBackend::checkAvailable() {
|
||||
try {
|
||||
watchmanConnect();
|
||||
return true;
|
||||
} catch (std::exception &err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void handleFiles(WatcherRef watcher, BSER::Object obj) {
|
||||
auto found = obj.find("files");
|
||||
if (found == obj.end()) {
|
||||
throw WatcherError("Error reading changes from watchman", watcher);
|
||||
}
|
||||
|
||||
auto files = found->second.arrayValue();
|
||||
for (auto it = files.begin(); it != files.end(); it++) {
|
||||
auto file = it->objectValue();
|
||||
auto name = file.find("name")->second.stringValue();
|
||||
#ifdef _WIN32
|
||||
std::replace(name.begin(), name.end(), '/', '\\');
|
||||
#endif
|
||||
auto mode = file.find("mode")->second.intValue();
|
||||
auto isNew = file.find("new")->second.boolValue();
|
||||
auto exists = file.find("exists")->second.boolValue();
|
||||
auto path = watcher->mDir + DIR_SEP + name;
|
||||
if (watcher->isIgnored(path)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isNew && exists) {
|
||||
watcher->mEvents.create(path);
|
||||
} else if (exists && !S_ISDIR(mode)) {
|
||||
watcher->mEvents.update(path);
|
||||
} else if (!isNew && !exists) {
|
||||
watcher->mEvents.remove(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WatchmanBackend::handleSubscription(BSER::Object obj) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
auto subscription = obj.find("subscription")->second.stringValue();
|
||||
auto it = mSubscriptions.find(subscription);
|
||||
if (it == mSubscriptions.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto watcher = it->second;
|
||||
try {
|
||||
handleFiles(watcher, obj);
|
||||
watcher->notify();
|
||||
} catch (WatcherError &err) {
|
||||
handleWatcherError(err);
|
||||
}
|
||||
}
|
||||
|
||||
void WatchmanBackend::start() {
|
||||
mIPC = watchmanConnect();
|
||||
notifyStarted();
|
||||
|
||||
while (true) {
|
||||
// If there are no subscriptions we are reading, wait for a request.
|
||||
if (mSubscriptions.size() == 0) {
|
||||
mRequestSignal.wait();
|
||||
mRequestSignal.reset();
|
||||
}
|
||||
|
||||
// Break out of loop if we are stopped.
|
||||
if (mStopped) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Attempt to read from the socket.
|
||||
// If there is an error and we are stopped, break.
|
||||
BSER b;
|
||||
try {
|
||||
b = watchmanRead(&*mIPC);
|
||||
} catch (std::exception &err) {
|
||||
if (mStopped) {
|
||||
break;
|
||||
} else if (mResponseSignal.isWaiting()) {
|
||||
mError = err.what();
|
||||
mResponseSignal.notify();
|
||||
} else {
|
||||
// Throwing causes the backend to be destroyed, but we never reach the code below to notify the signal
|
||||
mEndedSignal.notify();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
auto obj = b.objectValue();
|
||||
auto error = obj.find("error");
|
||||
if (error != obj.end()) {
|
||||
mError = error->second.stringValue();
|
||||
mResponseSignal.notify();
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this message is for a subscription, handle it, otherwise notify the request.
|
||||
auto subscription = obj.find("subscription");
|
||||
if (subscription != obj.end()) {
|
||||
handleSubscription(obj);
|
||||
} else {
|
||||
mResponse = obj;
|
||||
mResponseSignal.notify();
|
||||
}
|
||||
}
|
||||
|
||||
mEndedSignal.notify();
|
||||
}
|
||||
|
||||
WatchmanBackend::~WatchmanBackend() {
|
||||
// Mark the watcher as stopped, close the socket, and trigger the lock.
|
||||
// This will cause the read loop to be broken and the thread to exit.
|
||||
mStopped = true;
|
||||
mIPC.reset();
|
||||
mRequestSignal.notify();
|
||||
|
||||
// If not ended yet, wait.
|
||||
mEndedSignal.wait();
|
||||
}
|
||||
|
||||
std::string WatchmanBackend::clock(WatcherRef watcher) {
|
||||
BSER::Array cmd;
|
||||
cmd.push_back("clock");
|
||||
cmd.push_back(normalizePath(watcher->mDir));
|
||||
|
||||
BSER::Object obj = watchmanRequest(cmd);
|
||||
auto found = obj.find("clock");
|
||||
if (found == obj.end()) {
|
||||
throw WatcherError("Error reading clock from watchman", watcher);
|
||||
}
|
||||
|
||||
return found->second.stringValue();
|
||||
}
|
||||
|
||||
void WatchmanBackend::writeSnapshot(WatcherRef watcher, std::string *snapshotPath) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
watchmanWatch(watcher->mDir);
|
||||
|
||||
std::ofstream ofs(*snapshotPath);
|
||||
ofs << clock(watcher);
|
||||
}
|
||||
|
||||
void WatchmanBackend::getEventsSince(WatcherRef watcher, std::string *snapshotPath) {
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
std::ifstream ifs(*snapshotPath);
|
||||
if (ifs.fail()) {
|
||||
return;
|
||||
}
|
||||
|
||||
watchmanWatch(watcher->mDir);
|
||||
|
||||
std::string clock;
|
||||
ifs >> clock;
|
||||
|
||||
BSER::Array cmd;
|
||||
cmd.push_back("since");
|
||||
cmd.push_back(normalizePath(watcher->mDir));
|
||||
cmd.push_back(clock);
|
||||
|
||||
BSER::Object obj = watchmanRequest(cmd);
|
||||
handleFiles(watcher, obj);
|
||||
}
|
||||
|
||||
std::string getId(WatcherRef watcher) {
|
||||
std::ostringstream id;
|
||||
id << "parcel-";
|
||||
id << static_cast<void*>(watcher.get());
|
||||
return id.str();
|
||||
}
|
||||
|
||||
// This function is called by Backend::watch which takes a lock on mMutex
|
||||
void WatchmanBackend::subscribe(WatcherRef watcher) {
|
||||
watchmanWatch(watcher->mDir);
|
||||
|
||||
std::string id = getId(watcher);
|
||||
BSER::Array cmd;
|
||||
cmd.push_back("subscribe");
|
||||
cmd.push_back(normalizePath(watcher->mDir));
|
||||
cmd.push_back(id);
|
||||
|
||||
BSER::Array fields;
|
||||
fields.push_back("name");
|
||||
fields.push_back("mode");
|
||||
fields.push_back("exists");
|
||||
fields.push_back("new");
|
||||
|
||||
BSER::Object opts;
|
||||
opts.emplace("fields", fields);
|
||||
opts.emplace("since", clock(watcher));
|
||||
|
||||
if (watcher->mIgnorePaths.size() > 0) {
|
||||
BSER::Array ignore;
|
||||
BSER::Array anyOf;
|
||||
anyOf.push_back("anyof");
|
||||
|
||||
for (auto it = watcher->mIgnorePaths.begin(); it != watcher->mIgnorePaths.end(); it++) {
|
||||
std::string pathStart = watcher->mDir + DIR_SEP;
|
||||
if (it->rfind(pathStart, 0) == 0) {
|
||||
auto relative = it->substr(pathStart.size());
|
||||
BSER::Array dirname;
|
||||
dirname.push_back("dirname");
|
||||
dirname.push_back(relative);
|
||||
anyOf.push_back(dirname);
|
||||
}
|
||||
}
|
||||
|
||||
ignore.push_back("not");
|
||||
ignore.push_back(anyOf);
|
||||
|
||||
opts.emplace("expression", ignore);
|
||||
}
|
||||
|
||||
cmd.push_back(opts);
|
||||
watchmanRequest(cmd);
|
||||
|
||||
mSubscriptions.emplace(id, watcher);
|
||||
mRequestSignal.notify();
|
||||
}
|
||||
|
||||
// This function is called by Backend::unwatch which takes a lock on mMutex
|
||||
void WatchmanBackend::unsubscribe(WatcherRef watcher) {
|
||||
std::string id = getId(watcher);
|
||||
auto erased = mSubscriptions.erase(id);
|
||||
|
||||
if (erased) {
|
||||
BSER::Array cmd;
|
||||
cmd.push_back("unsubscribe");
|
||||
cmd.push_back(normalizePath(watcher->mDir));
|
||||
cmd.push_back(id);
|
||||
|
||||
watchmanRequest(cmd);
|
||||
}
|
||||
}
|
||||
35
lars/node_modules/@parcel/watcher/src/watchman/WatchmanBackend.hh
generated
vendored
Normal file
35
lars/node_modules/@parcel/watcher/src/watchman/WatchmanBackend.hh
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef WATCHMAN_H
|
||||
#define WATCHMAN_H
|
||||
|
||||
#include "../Backend.hh"
|
||||
#include "./BSER.hh"
|
||||
#include "../Signal.hh"
|
||||
#include "./IPC.hh"
|
||||
|
||||
class WatchmanBackend : public Backend {
|
||||
public:
|
||||
static bool checkAvailable();
|
||||
void start() override;
|
||||
WatchmanBackend() : mStopped(false) {};
|
||||
~WatchmanBackend();
|
||||
void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) override;
|
||||
void getEventsSince(WatcherRef watcher, std::string *snapshotPath) override;
|
||||
void subscribe(WatcherRef watcher) override;
|
||||
void unsubscribe(WatcherRef watcher) override;
|
||||
private:
|
||||
std::unique_ptr<IPC> mIPC;
|
||||
Signal mRequestSignal;
|
||||
Signal mResponseSignal;
|
||||
BSER::Object mResponse;
|
||||
std::string mError;
|
||||
std::unordered_map<std::string, WatcherRef> mSubscriptions;
|
||||
bool mStopped;
|
||||
Signal mEndedSignal;
|
||||
|
||||
std::string clock(WatcherRef watcher);
|
||||
void watchmanWatch(std::string dir);
|
||||
BSER::Object watchmanRequest(BSER cmd);
|
||||
void handleSubscription(BSER::Object obj);
|
||||
};
|
||||
|
||||
#endif
|
||||
282
lars/node_modules/@parcel/watcher/src/windows/WindowsBackend.cc
generated
vendored
Normal file
282
lars/node_modules/@parcel/watcher/src/windows/WindowsBackend.cc
generated
vendored
Normal file
@@ -0,0 +1,282 @@
|
||||
#include <string>
|
||||
#include <stack>
|
||||
#include "../DirTree.hh"
|
||||
#include "../shared/BruteForceBackend.hh"
|
||||
#include "./WindowsBackend.hh"
|
||||
#include "./win_utils.hh"
|
||||
|
||||
#define DEFAULT_BUF_SIZE 1024 * 1024
|
||||
#define NETWORK_BUF_SIZE 64 * 1024
|
||||
#define CONVERT_TIME(ft) ULARGE_INTEGER{ft.dwLowDateTime, ft.dwHighDateTime}.QuadPart
|
||||
|
||||
void BruteForceBackend::readTree(WatcherRef watcher, std::shared_ptr<DirTree> tree) {
|
||||
std::stack<std::string> directories;
|
||||
|
||||
directories.push(watcher->mDir);
|
||||
|
||||
while (!directories.empty()) {
|
||||
HANDLE hFind = INVALID_HANDLE_VALUE;
|
||||
|
||||
std::string path = directories.top();
|
||||
std::string spec = path + "\\*";
|
||||
directories.pop();
|
||||
|
||||
WIN32_FIND_DATA ffd;
|
||||
hFind = FindFirstFile(spec.c_str(), &ffd);
|
||||
|
||||
if (hFind == INVALID_HANDLE_VALUE) {
|
||||
if (path == watcher->mDir) {
|
||||
FindClose(hFind);
|
||||
throw WatcherError("Error opening directory", watcher);
|
||||
}
|
||||
|
||||
tree->remove(path);
|
||||
continue;
|
||||
}
|
||||
|
||||
do {
|
||||
if (strcmp(ffd.cFileName, ".") != 0 && strcmp(ffd.cFileName, "..") != 0) {
|
||||
std::string fullPath = path + "\\" + ffd.cFileName;
|
||||
if (watcher->isIgnored(fullPath)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tree->add(fullPath, CONVERT_TIME(ffd.ftLastWriteTime), ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
|
||||
if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
|
||||
directories.push(fullPath);
|
||||
}
|
||||
}
|
||||
} while (FindNextFile(hFind, &ffd) != 0);
|
||||
|
||||
FindClose(hFind);
|
||||
}
|
||||
}
|
||||
|
||||
void WindowsBackend::start() {
|
||||
mRunning = true;
|
||||
notifyStarted();
|
||||
|
||||
while (mRunning) {
|
||||
SleepEx(INFINITE, true);
|
||||
}
|
||||
}
|
||||
|
||||
WindowsBackend::~WindowsBackend() {
|
||||
// Mark as stopped, and queue a noop function in the thread to break the loop
|
||||
mRunning = false;
|
||||
QueueUserAPC([](__in ULONG_PTR) {}, mThread.native_handle(), (ULONG_PTR)this);
|
||||
}
|
||||
|
||||
class Subscription: public WatcherState {
|
||||
public:
|
||||
Subscription(WindowsBackend *backend, WatcherRef watcher, std::shared_ptr<DirTree> tree) {
|
||||
mRunning = true;
|
||||
mBackend = backend;
|
||||
mWatcher = watcher;
|
||||
mTree = tree;
|
||||
ZeroMemory(&mOverlapped, sizeof(OVERLAPPED));
|
||||
mOverlapped.hEvent = this;
|
||||
mReadBuffer.resize(DEFAULT_BUF_SIZE);
|
||||
mWriteBuffer.resize(DEFAULT_BUF_SIZE);
|
||||
|
||||
mDirectoryHandle = CreateFileW(
|
||||
utf8ToUtf16(watcher->mDir).data(),
|
||||
FILE_LIST_DIRECTORY,
|
||||
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
|
||||
NULL,
|
||||
OPEN_EXISTING,
|
||||
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED,
|
||||
NULL
|
||||
);
|
||||
|
||||
if (mDirectoryHandle == INVALID_HANDLE_VALUE) {
|
||||
throw WatcherError("Invalid handle", mWatcher);
|
||||
}
|
||||
|
||||
// Ensure that the path is a directory
|
||||
BY_HANDLE_FILE_INFORMATION info;
|
||||
bool success = GetFileInformationByHandle(
|
||||
mDirectoryHandle,
|
||||
&info
|
||||
);
|
||||
|
||||
if (!success) {
|
||||
throw WatcherError("Could not get file information", mWatcher);
|
||||
}
|
||||
|
||||
if (!(info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
|
||||
throw WatcherError("Not a directory", mWatcher);
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~Subscription() {
|
||||
stop();
|
||||
}
|
||||
|
||||
void run() {
|
||||
try {
|
||||
poll();
|
||||
} catch (WatcherError &err) {
|
||||
mBackend->handleWatcherError(err);
|
||||
}
|
||||
}
|
||||
|
||||
void stop() {
|
||||
if (mRunning) {
|
||||
mRunning = false;
|
||||
CancelIo(mDirectoryHandle);
|
||||
CloseHandle(mDirectoryHandle);
|
||||
}
|
||||
}
|
||||
|
||||
void poll() {
|
||||
if (!mRunning) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Asynchronously wait for changes.
|
||||
int success = ReadDirectoryChangesW(
|
||||
mDirectoryHandle,
|
||||
mWriteBuffer.data(),
|
||||
static_cast<DWORD>(mWriteBuffer.size()),
|
||||
TRUE, // recursive
|
||||
FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
| FILE_NOTIFY_CHANGE_SIZE | FILE_NOTIFY_CHANGE_LAST_WRITE,
|
||||
NULL,
|
||||
&mOverlapped,
|
||||
[](DWORD errorCode, DWORD numBytes, LPOVERLAPPED overlapped) {
|
||||
auto subscription = reinterpret_cast<Subscription *>(overlapped->hEvent);
|
||||
try {
|
||||
subscription->processEvents(errorCode);
|
||||
} catch (WatcherError &err) {
|
||||
subscription->mBackend->handleWatcherError(err);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
if (!success) {
|
||||
throw WatcherError("Failed to read changes", mWatcher);
|
||||
}
|
||||
}
|
||||
|
||||
void processEvents(DWORD errorCode) {
|
||||
if (!mRunning) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (errorCode) {
|
||||
case ERROR_OPERATION_ABORTED:
|
||||
return;
|
||||
case ERROR_INVALID_PARAMETER:
|
||||
// resize buffers to network size (64kb), and try again
|
||||
mReadBuffer.resize(NETWORK_BUF_SIZE);
|
||||
mWriteBuffer.resize(NETWORK_BUF_SIZE);
|
||||
poll();
|
||||
return;
|
||||
case ERROR_NOTIFY_ENUM_DIR:
|
||||
throw WatcherError("Buffer overflow. Some events may have been lost.", mWatcher);
|
||||
case ERROR_ACCESS_DENIED: {
|
||||
// This can happen if the watched directory is deleted. Check if that is the case,
|
||||
// and if so emit a delete event. Otherwise, fall through to default error case.
|
||||
DWORD attrs = GetFileAttributesW(utf8ToUtf16(mWatcher->mDir).data());
|
||||
bool isDir = attrs != INVALID_FILE_ATTRIBUTES && (attrs & FILE_ATTRIBUTE_DIRECTORY);
|
||||
if (!isDir) {
|
||||
mWatcher->mEvents.remove(mWatcher->mDir);
|
||||
mTree->remove(mWatcher->mDir);
|
||||
mWatcher->notify();
|
||||
stop();
|
||||
return;
|
||||
}
|
||||
}
|
||||
default:
|
||||
if (errorCode != ERROR_SUCCESS) {
|
||||
throw WatcherError("Unknown error", mWatcher);
|
||||
}
|
||||
}
|
||||
|
||||
// Swap read and write buffers, and poll again
|
||||
std::swap(mWriteBuffer, mReadBuffer);
|
||||
poll();
|
||||
|
||||
// Read change events
|
||||
BYTE *base = mReadBuffer.data();
|
||||
while (true) {
|
||||
PFILE_NOTIFY_INFORMATION info = (PFILE_NOTIFY_INFORMATION)base;
|
||||
processEvent(info);
|
||||
|
||||
if (info->NextEntryOffset == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
base += info->NextEntryOffset;
|
||||
}
|
||||
|
||||
mWatcher->notify();
|
||||
}
|
||||
|
||||
void processEvent(PFILE_NOTIFY_INFORMATION info) {
|
||||
std::string path = mWatcher->mDir + "\\" + utf16ToUtf8(info->FileName, info->FileNameLength / sizeof(WCHAR));
|
||||
if (mWatcher->isIgnored(path)) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (info->Action) {
|
||||
case FILE_ACTION_ADDED:
|
||||
case FILE_ACTION_RENAMED_NEW_NAME: {
|
||||
WIN32_FILE_ATTRIBUTE_DATA data;
|
||||
if (GetFileAttributesExW(utf8ToUtf16(path).data(), GetFileExInfoStandard, &data)) {
|
||||
mWatcher->mEvents.create(path);
|
||||
mTree->add(path, CONVERT_TIME(data.ftLastWriteTime), data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case FILE_ACTION_MODIFIED: {
|
||||
WIN32_FILE_ATTRIBUTE_DATA data;
|
||||
if (GetFileAttributesExW(utf8ToUtf16(path).data(), GetFileExInfoStandard, &data)) {
|
||||
mTree->update(path, CONVERT_TIME(data.ftLastWriteTime));
|
||||
if (!(data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
|
||||
mWatcher->mEvents.update(path);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case FILE_ACTION_REMOVED:
|
||||
case FILE_ACTION_RENAMED_OLD_NAME:
|
||||
mWatcher->mEvents.remove(path);
|
||||
mTree->remove(path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
WindowsBackend *mBackend;
|
||||
std::shared_ptr<Watcher> mWatcher;
|
||||
std::shared_ptr<DirTree> mTree;
|
||||
bool mRunning;
|
||||
HANDLE mDirectoryHandle;
|
||||
std::vector<BYTE> mReadBuffer;
|
||||
std::vector<BYTE> mWriteBuffer;
|
||||
OVERLAPPED mOverlapped;
|
||||
};
|
||||
|
||||
// This function is called by Backend::watch which takes a lock on mMutex
|
||||
void WindowsBackend::subscribe(WatcherRef watcher) {
|
||||
// Create a subscription for this watcher
|
||||
auto sub = std::make_shared<Subscription>(this, watcher, getTree(watcher, false));
|
||||
watcher->state = sub;
|
||||
|
||||
// Queue polling for this subscription in the correct thread.
|
||||
bool success = QueueUserAPC([](__in ULONG_PTR ptr) {
|
||||
Subscription *sub = (Subscription *)ptr;
|
||||
sub->run();
|
||||
}, mThread.native_handle(), (ULONG_PTR)sub.get());
|
||||
|
||||
if (!success) {
|
||||
throw std::runtime_error("Unable to queue APC");
|
||||
}
|
||||
}
|
||||
|
||||
// This function is called by Backend::unwatch which takes a lock on mMutex
|
||||
void WindowsBackend::unsubscribe(WatcherRef watcher) {
|
||||
watcher->state = nullptr;
|
||||
}
|
||||
18
lars/node_modules/@parcel/watcher/src/windows/WindowsBackend.hh
generated
vendored
Normal file
18
lars/node_modules/@parcel/watcher/src/windows/WindowsBackend.hh
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef WINDOWS_H
|
||||
#define WINDOWS_H
|
||||
|
||||
#include <winsock2.h>
|
||||
#include <windows.h>
|
||||
#include "../shared/BruteForceBackend.hh"
|
||||
|
||||
class WindowsBackend : public BruteForceBackend {
|
||||
public:
|
||||
void start() override;
|
||||
~WindowsBackend();
|
||||
void subscribe(WatcherRef watcher) override;
|
||||
void unsubscribe(WatcherRef watcher) override;
|
||||
private:
|
||||
bool mRunning;
|
||||
};
|
||||
|
||||
#endif
|
||||
44
lars/node_modules/@parcel/watcher/src/windows/win_utils.cc
generated
vendored
Normal file
44
lars/node_modules/@parcel/watcher/src/windows/win_utils.cc
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
#include "./win_utils.hh"
|
||||
|
||||
std::wstring utf8ToUtf16(std::string input) {
|
||||
unsigned int len = MultiByteToWideChar(CP_UTF8, 0, input.c_str(), -1, NULL, 0);
|
||||
WCHAR *output = new WCHAR[len];
|
||||
MultiByteToWideChar(CP_UTF8, 0, input.c_str(), -1, output, len);
|
||||
std::wstring res(output);
|
||||
delete output;
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string utf16ToUtf8(const WCHAR *input, size_t length) {
|
||||
unsigned int len = WideCharToMultiByte(CP_UTF8, 0, input, length, NULL, 0, NULL, NULL);
|
||||
char *output = new char[len + 1];
|
||||
WideCharToMultiByte(CP_UTF8, 0, input, length, output, len, NULL, NULL);
|
||||
output[len] = '\0';
|
||||
std::string res(output);
|
||||
delete output;
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string normalizePath(std::string path) {
|
||||
// Prevent truncation to MAX_PATH characters by adding the \\?\ prefix
|
||||
std::wstring p = utf8ToUtf16("\\\\?\\" + path);
|
||||
|
||||
// Get the required length for the output
|
||||
unsigned int len = GetLongPathNameW(p.data(), NULL, 0);
|
||||
if (!len) {
|
||||
return path;
|
||||
}
|
||||
|
||||
// Allocate output array and get long path
|
||||
WCHAR *output = new WCHAR[len];
|
||||
len = GetLongPathNameW(p.data(), output, len);
|
||||
if (!len) {
|
||||
delete output;
|
||||
return path;
|
||||
}
|
||||
|
||||
// Convert back to utf8
|
||||
std::string res = utf16ToUtf8(output + 4, len - 4);
|
||||
delete output;
|
||||
return res;
|
||||
}
|
||||
11
lars/node_modules/@parcel/watcher/src/windows/win_utils.hh
generated
vendored
Normal file
11
lars/node_modules/@parcel/watcher/src/windows/win_utils.hh
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef WIN_UTILS_H
|
||||
#define WIN_UTILS_H
|
||||
|
||||
#include <string>
|
||||
#include <windows.h>
|
||||
|
||||
std::wstring utf8ToUtf16(std::string input);
|
||||
std::string utf16ToUtf8(const WCHAR *input, size_t length);
|
||||
std::string normalizePath(std::string path);
|
||||
|
||||
#endif
|
||||
77
lars/node_modules/@parcel/watcher/wrapper.js
generated
vendored
Normal file
77
lars/node_modules/@parcel/watcher/wrapper.js
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
const path = require('path');
|
||||
const micromatch = require('micromatch');
|
||||
const isGlob = require('is-glob');
|
||||
|
||||
function normalizeOptions(dir, opts = {}) {
|
||||
const { ignore, ...rest } = opts;
|
||||
|
||||
if (Array.isArray(ignore)) {
|
||||
opts = { ...rest };
|
||||
|
||||
for (const value of ignore) {
|
||||
if (isGlob(value)) {
|
||||
if (!opts.ignoreGlobs) {
|
||||
opts.ignoreGlobs = [];
|
||||
}
|
||||
|
||||
const regex = micromatch.makeRe(value, {
|
||||
// We set `dot: true` to workaround an issue with the
|
||||
// regular expression on Linux where the resulting
|
||||
// negative lookahead `(?!(\\/|^)` was never matching
|
||||
// in some cases. See also https://bit.ly/3UZlQDm
|
||||
dot: true,
|
||||
// C++ does not support lookbehind regex patterns, they
|
||||
// were only added later to JavaScript engines
|
||||
// (https://bit.ly/3V7S6UL)
|
||||
lookbehinds: false
|
||||
});
|
||||
opts.ignoreGlobs.push(regex.source);
|
||||
} else {
|
||||
if (!opts.ignorePaths) {
|
||||
opts.ignorePaths = [];
|
||||
}
|
||||
|
||||
opts.ignorePaths.push(path.resolve(dir, value));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return opts;
|
||||
}
|
||||
|
||||
exports.createWrapper = (binding) => {
|
||||
return {
|
||||
writeSnapshot(dir, snapshot, opts) {
|
||||
return binding.writeSnapshot(
|
||||
path.resolve(dir),
|
||||
path.resolve(snapshot),
|
||||
normalizeOptions(dir, opts),
|
||||
);
|
||||
},
|
||||
getEventsSince(dir, snapshot, opts) {
|
||||
return binding.getEventsSince(
|
||||
path.resolve(dir),
|
||||
path.resolve(snapshot),
|
||||
normalizeOptions(dir, opts),
|
||||
);
|
||||
},
|
||||
async subscribe(dir, fn, opts) {
|
||||
dir = path.resolve(dir);
|
||||
opts = normalizeOptions(dir, opts);
|
||||
await binding.subscribe(dir, fn, opts);
|
||||
|
||||
return {
|
||||
unsubscribe() {
|
||||
return binding.unsubscribe(dir, fn, opts);
|
||||
},
|
||||
};
|
||||
},
|
||||
unsubscribe(dir, fn, opts) {
|
||||
return binding.unsubscribe(
|
||||
path.resolve(dir),
|
||||
fn,
|
||||
normalizeOptions(dir, opts),
|
||||
);
|
||||
}
|
||||
};
|
||||
};
|
||||
21
lars/node_modules/@tailwindcss/cli/LICENSE
generated
vendored
Normal file
21
lars/node_modules/@tailwindcss/cli/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Tailwind Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
40
lars/node_modules/@tailwindcss/cli/README.md
generated
vendored
Normal file
40
lars/node_modules/@tailwindcss/cli/README.md
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
<p align="center">
|
||||
<a href="https://tailwindcss.com" target="_blank">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/tailwindlabs/tailwindcss/HEAD/.github/logo-dark.svg">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/tailwindlabs/tailwindcss/HEAD/.github/logo-light.svg">
|
||||
<img alt="Tailwind CSS" src="https://raw.githubusercontent.com/tailwindlabs/tailwindcss/HEAD/.github/logo-light.svg" width="350" height="70" style="max-width: 100%;">
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
A utility-first CSS framework for rapidly building custom user interfaces.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/tailwindlabs/tailwindcss/actions"><img src="https://img.shields.io/github/actions/workflow/status/tailwindlabs/tailwindcss/ci.yml?branch=next" alt="Build Status"></a>
|
||||
<a href="https://www.npmjs.com/package/tailwindcss"><img src="https://img.shields.io/npm/dt/tailwindcss.svg" alt="Total Downloads"></a>
|
||||
<a href="https://github.com/tailwindcss/tailwindcss/releases"><img src="https://img.shields.io/npm/v/tailwindcss.svg" alt="Latest Release"></a>
|
||||
<a href="https://github.com/tailwindcss/tailwindcss/blob/master/LICENSE"><img src="https://img.shields.io/npm/l/tailwindcss.svg" alt="License"></a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
For full documentation, visit [tailwindcss.com](https://tailwindcss.com).
|
||||
|
||||
## Community
|
||||
|
||||
For help, discussion about best practices, or any other conversation that would benefit from being searchable:
|
||||
|
||||
[Discuss Tailwind CSS on GitHub](https://github.com/tailwindcss/tailwindcss/discussions)
|
||||
|
||||
For chatting with others using the framework:
|
||||
|
||||
[Join the Tailwind CSS Discord Server](https://discord.gg/7NF8GNe)
|
||||
|
||||
## Contributing
|
||||
|
||||
If you're interested in contributing to Tailwind CSS, please read our [contributing docs](https://github.com/tailwindcss/tailwindcss/blob/next/.github/CONTRIBUTING.md) **before submitting a pull request**.
|
||||
10
lars/node_modules/@tailwindcss/cli/dist/index.mjs
generated
vendored
Executable file
10
lars/node_modules/@tailwindcss/cli/dist/index.mjs
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
40
lars/node_modules/@tailwindcss/cli/package.json
generated
vendored
Normal file
40
lars/node_modules/@tailwindcss/cli/package.json
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"name": "@tailwindcss/cli",
|
||||
"version": "4.1.0",
|
||||
"description": "A utility-first CSS framework for rapidly building custom user interfaces.",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tailwindlabs/tailwindcss.git",
|
||||
"directory": "packages/@tailwindcss-cli"
|
||||
},
|
||||
"bugs": "https://github.com/tailwindlabs/tailwindcss/issues",
|
||||
"homepage": "https://tailwindcss.com",
|
||||
"bin": {
|
||||
"tailwindcss": "./dist/index.mjs"
|
||||
},
|
||||
"exports": {
|
||||
"./package.json": "./package.json"
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"publishConfig": {
|
||||
"provenance": true,
|
||||
"access": "public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@parcel/watcher": "^2.5.1",
|
||||
"enhanced-resolve": "^5.18.1",
|
||||
"mri": "^1.2.0",
|
||||
"picocolors": "^1.1.1",
|
||||
"@tailwindcss/node": "4.1.0",
|
||||
"@tailwindcss/oxide": "4.1.0",
|
||||
"tailwindcss": "4.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"lint": "tsc --noEmit",
|
||||
"build": "tsup-node",
|
||||
"dev": "pnpm run build -- --watch"
|
||||
}
|
||||
}
|
||||
21
lars/node_modules/@tailwindcss/node/LICENSE
generated
vendored
Normal file
21
lars/node_modules/@tailwindcss/node/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Tailwind Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
40
lars/node_modules/@tailwindcss/node/README.md
generated
vendored
Normal file
40
lars/node_modules/@tailwindcss/node/README.md
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
<p align="center">
|
||||
<a href="https://tailwindcss.com" target="_blank">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/tailwindlabs/tailwindcss/HEAD/.github/logo-dark.svg">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/tailwindlabs/tailwindcss/HEAD/.github/logo-light.svg">
|
||||
<img alt="Tailwind CSS" src="https://raw.githubusercontent.com/tailwindlabs/tailwindcss/HEAD/.github/logo-light.svg" width="350" height="70" style="max-width: 100%;">
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
A utility-first CSS framework for rapidly building custom user interfaces.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/tailwindlabs/tailwindcss/actions"><img src="https://img.shields.io/github/actions/workflow/status/tailwindlabs/tailwindcss/ci.yml?branch=next" alt="Build Status"></a>
|
||||
<a href="https://www.npmjs.com/package/tailwindcss"><img src="https://img.shields.io/npm/dt/tailwindcss.svg" alt="Total Downloads"></a>
|
||||
<a href="https://github.com/tailwindcss/tailwindcss/releases"><img src="https://img.shields.io/npm/v/tailwindcss.svg" alt="Latest Release"></a>
|
||||
<a href="https://github.com/tailwindcss/tailwindcss/blob/master/LICENSE"><img src="https://img.shields.io/npm/l/tailwindcss.svg" alt="License"></a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
For full documentation, visit [tailwindcss.com](https://tailwindcss.com).
|
||||
|
||||
## Community
|
||||
|
||||
For help, discussion about best practices, or any other conversation that would benefit from being searchable:
|
||||
|
||||
[Discuss Tailwind CSS on GitHub](https://github.com/tailwindcss/tailwindcss/discussions)
|
||||
|
||||
For chatting with others using the framework:
|
||||
|
||||
[Join the Tailwind CSS Discord Server](https://discord.gg/7NF8GNe)
|
||||
|
||||
## Contributing
|
||||
|
||||
If you're interested in contributing to Tailwind CSS, please read our [contributing docs](https://github.com/tailwindcss/tailwindcss/blob/next/.github/CONTRIBUTING.md) **before submitting a pull request**.
|
||||
5
lars/node_modules/@tailwindcss/node/dist/esm-cache.loader.d.mts
generated
vendored
Normal file
5
lars/node_modules/@tailwindcss/node/dist/esm-cache.loader.d.mts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import { ResolveHook } from 'node:module';
|
||||
|
||||
declare let resolve: ResolveHook;
|
||||
|
||||
export { resolve };
|
||||
1
lars/node_modules/@tailwindcss/node/dist/esm-cache.loader.mjs
generated
vendored
Normal file
1
lars/node_modules/@tailwindcss/node/dist/esm-cache.loader.mjs
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{isBuiltin as i}from"node:module";var o=async(a,e,u)=>{let r=await u(a,e);if(r.url===import.meta.url||i(r.url)||!e.parentURL)return r;let t=new URL(e.parentURL).searchParams.get("id");if(t===null)return r;let l=new URL(r.url);return l.searchParams.set("id",t),{...r,url:`${l}`}};export{o as resolve};
|
||||
129
lars/node_modules/@tailwindcss/node/dist/index.d.mts
generated
vendored
Normal file
129
lars/node_modules/@tailwindcss/node/dist/index.d.mts
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
import { Candidate, Variant } from './candidate';
|
||||
import { compileAstNodes } from './compile';
|
||||
import { ClassEntry, VariantEntry } from './intellisense';
|
||||
import { Theme } from './theme';
|
||||
import { Utilities } from './utilities';
|
||||
import { Variants } from './variants';
|
||||
import { Polyfills, Features } from 'tailwindcss';
|
||||
export { Features, Polyfills } from 'tailwindcss';
|
||||
|
||||
declare const DEBUG: boolean;
|
||||
|
||||
declare const env_DEBUG: typeof DEBUG;
|
||||
declare namespace env {
|
||||
export { env_DEBUG as DEBUG };
|
||||
}
|
||||
|
||||
type DesignSystem = {
|
||||
theme: Theme;
|
||||
utilities: Utilities;
|
||||
variants: Variants;
|
||||
invalidCandidates: Set<string>;
|
||||
important: boolean;
|
||||
getClassOrder(classes: string[]): [string, bigint | null][];
|
||||
getClassList(): ClassEntry[];
|
||||
getVariants(): VariantEntry[];
|
||||
parseCandidate(candidate: string): Readonly<Candidate>[];
|
||||
parseVariant(variant: string): Readonly<Variant> | null;
|
||||
compileAstNodes(candidate: Candidate): ReturnType<typeof compileAstNodes>;
|
||||
getVariantOrder(): Map<Variant, number>;
|
||||
resolveThemeValue(path: string, forceInline?: boolean): string | undefined;
|
||||
trackUsedVariables(raw: string): void;
|
||||
candidatesToCss(classes: string[]): (string | null)[];
|
||||
};
|
||||
|
||||
type StyleRule = {
|
||||
kind: 'rule';
|
||||
selector: string;
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type AtRule = {
|
||||
kind: 'at-rule';
|
||||
name: string;
|
||||
params: string;
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type Declaration = {
|
||||
kind: 'declaration';
|
||||
property: string;
|
||||
value: string | undefined;
|
||||
important: boolean;
|
||||
};
|
||||
type Comment = {
|
||||
kind: 'comment';
|
||||
value: string;
|
||||
};
|
||||
type Context = {
|
||||
kind: 'context';
|
||||
context: Record<string, string | boolean>;
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type AtRoot = {
|
||||
kind: 'at-root';
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type AstNode = StyleRule | AtRule | Declaration | Comment | Context | AtRoot;
|
||||
|
||||
type Resolver = (id: string, base: string) => Promise<string | false | undefined>;
|
||||
interface CompileOptions {
|
||||
base: string;
|
||||
onDependency: (path: string) => void;
|
||||
shouldRewriteUrls?: boolean;
|
||||
polyfills?: Polyfills;
|
||||
customCssResolver?: Resolver;
|
||||
customJsResolver?: Resolver;
|
||||
}
|
||||
declare function compileAst(ast: AstNode[], options: CompileOptions): Promise<{
|
||||
sources: {
|
||||
base: string;
|
||||
pattern: string;
|
||||
negated: boolean;
|
||||
}[];
|
||||
root: "none" | {
|
||||
base: string;
|
||||
pattern: string;
|
||||
} | null;
|
||||
features: Features;
|
||||
build(candidates: string[]): AstNode[];
|
||||
}>;
|
||||
declare function compile(css: string, options: CompileOptions): Promise<{
|
||||
sources: {
|
||||
base: string;
|
||||
pattern: string;
|
||||
negated: boolean;
|
||||
}[];
|
||||
root: "none" | {
|
||||
base: string;
|
||||
pattern: string;
|
||||
} | null;
|
||||
features: Features;
|
||||
build(candidates: string[]): string;
|
||||
}>;
|
||||
declare function __unstable__loadDesignSystem(css: string, { base }: {
|
||||
base: string;
|
||||
}): Promise<DesignSystem>;
|
||||
declare function loadModule(id: string, base: string, onDependency: (path: string) => void, customJsResolver?: Resolver): Promise<{
|
||||
base: string;
|
||||
module: any;
|
||||
}>;
|
||||
|
||||
declare class Instrumentation implements Disposable {
|
||||
#private;
|
||||
private defaultFlush;
|
||||
constructor(defaultFlush?: (message: string) => undefined);
|
||||
hit(label: string): void;
|
||||
start(label: string): void;
|
||||
end(label: string): void;
|
||||
reset(): void;
|
||||
report(flush?: (message: string) => undefined): void;
|
||||
[Symbol.dispose](): void;
|
||||
}
|
||||
|
||||
declare function normalizePath(originalPath: string): string;
|
||||
|
||||
declare function optimize(input: string, { file, minify }?: {
|
||||
file?: string;
|
||||
minify?: boolean;
|
||||
}): string;
|
||||
|
||||
export { type CompileOptions, Instrumentation, type Resolver, __unstable__loadDesignSystem, compile, compileAst, env, loadModule, normalizePath, optimize };
|
||||
129
lars/node_modules/@tailwindcss/node/dist/index.d.ts
generated
vendored
Normal file
129
lars/node_modules/@tailwindcss/node/dist/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
import { Candidate, Variant } from './candidate';
|
||||
import { compileAstNodes } from './compile';
|
||||
import { ClassEntry, VariantEntry } from './intellisense';
|
||||
import { Theme } from './theme';
|
||||
import { Utilities } from './utilities';
|
||||
import { Variants } from './variants';
|
||||
import { Polyfills, Features } from 'tailwindcss';
|
||||
export { Features, Polyfills } from 'tailwindcss';
|
||||
|
||||
declare const DEBUG: boolean;
|
||||
|
||||
declare const env_DEBUG: typeof DEBUG;
|
||||
declare namespace env {
|
||||
export { env_DEBUG as DEBUG };
|
||||
}
|
||||
|
||||
type DesignSystem = {
|
||||
theme: Theme;
|
||||
utilities: Utilities;
|
||||
variants: Variants;
|
||||
invalidCandidates: Set<string>;
|
||||
important: boolean;
|
||||
getClassOrder(classes: string[]): [string, bigint | null][];
|
||||
getClassList(): ClassEntry[];
|
||||
getVariants(): VariantEntry[];
|
||||
parseCandidate(candidate: string): Readonly<Candidate>[];
|
||||
parseVariant(variant: string): Readonly<Variant> | null;
|
||||
compileAstNodes(candidate: Candidate): ReturnType<typeof compileAstNodes>;
|
||||
getVariantOrder(): Map<Variant, number>;
|
||||
resolveThemeValue(path: string, forceInline?: boolean): string | undefined;
|
||||
trackUsedVariables(raw: string): void;
|
||||
candidatesToCss(classes: string[]): (string | null)[];
|
||||
};
|
||||
|
||||
type StyleRule = {
|
||||
kind: 'rule';
|
||||
selector: string;
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type AtRule = {
|
||||
kind: 'at-rule';
|
||||
name: string;
|
||||
params: string;
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type Declaration = {
|
||||
kind: 'declaration';
|
||||
property: string;
|
||||
value: string | undefined;
|
||||
important: boolean;
|
||||
};
|
||||
type Comment = {
|
||||
kind: 'comment';
|
||||
value: string;
|
||||
};
|
||||
type Context = {
|
||||
kind: 'context';
|
||||
context: Record<string, string | boolean>;
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type AtRoot = {
|
||||
kind: 'at-root';
|
||||
nodes: AstNode[];
|
||||
};
|
||||
type AstNode = StyleRule | AtRule | Declaration | Comment | Context | AtRoot;
|
||||
|
||||
type Resolver = (id: string, base: string) => Promise<string | false | undefined>;
|
||||
interface CompileOptions {
|
||||
base: string;
|
||||
onDependency: (path: string) => void;
|
||||
shouldRewriteUrls?: boolean;
|
||||
polyfills?: Polyfills;
|
||||
customCssResolver?: Resolver;
|
||||
customJsResolver?: Resolver;
|
||||
}
|
||||
declare function compileAst(ast: AstNode[], options: CompileOptions): Promise<{
|
||||
sources: {
|
||||
base: string;
|
||||
pattern: string;
|
||||
negated: boolean;
|
||||
}[];
|
||||
root: "none" | {
|
||||
base: string;
|
||||
pattern: string;
|
||||
} | null;
|
||||
features: Features;
|
||||
build(candidates: string[]): AstNode[];
|
||||
}>;
|
||||
declare function compile(css: string, options: CompileOptions): Promise<{
|
||||
sources: {
|
||||
base: string;
|
||||
pattern: string;
|
||||
negated: boolean;
|
||||
}[];
|
||||
root: "none" | {
|
||||
base: string;
|
||||
pattern: string;
|
||||
} | null;
|
||||
features: Features;
|
||||
build(candidates: string[]): string;
|
||||
}>;
|
||||
declare function __unstable__loadDesignSystem(css: string, { base }: {
|
||||
base: string;
|
||||
}): Promise<DesignSystem>;
|
||||
declare function loadModule(id: string, base: string, onDependency: (path: string) => void, customJsResolver?: Resolver): Promise<{
|
||||
base: string;
|
||||
module: any;
|
||||
}>;
|
||||
|
||||
declare class Instrumentation implements Disposable {
|
||||
#private;
|
||||
private defaultFlush;
|
||||
constructor(defaultFlush?: (message: string) => undefined);
|
||||
hit(label: string): void;
|
||||
start(label: string): void;
|
||||
end(label: string): void;
|
||||
reset(): void;
|
||||
report(flush?: (message: string) => undefined): void;
|
||||
[Symbol.dispose](): void;
|
||||
}
|
||||
|
||||
declare function normalizePath(originalPath: string): string;
|
||||
|
||||
declare function optimize(input: string, { file, minify }?: {
|
||||
file?: string;
|
||||
minify?: boolean;
|
||||
}): string;
|
||||
|
||||
export { type CompileOptions, Instrumentation, type Resolver, __unstable__loadDesignSystem, compile, compileAst, env, loadModule, normalizePath, optimize };
|
||||
17
lars/node_modules/@tailwindcss/node/dist/index.js
generated
vendored
Normal file
17
lars/node_modules/@tailwindcss/node/dist/index.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
17
lars/node_modules/@tailwindcss/node/dist/index.mjs
generated
vendored
Normal file
17
lars/node_modules/@tailwindcss/node/dist/index.mjs
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
3
lars/node_modules/@tailwindcss/node/dist/require-cache.d.ts
generated
vendored
Normal file
3
lars/node_modules/@tailwindcss/node/dist/require-cache.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
declare function clearRequireCache(files: string[]): void;
|
||||
|
||||
export { clearRequireCache };
|
||||
1
lars/node_modules/@tailwindcss/node/dist/require-cache.js
generated
vendored
Normal file
1
lars/node_modules/@tailwindcss/node/dist/require-cache.js
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
"use strict";var i=Object.defineProperty;var a=Object.getOwnPropertyDescriptor;var f=Object.getOwnPropertyNames;var l=Object.prototype.hasOwnProperty;var n=(r,e)=>{for(var t in e)i(r,t,{get:e[t],enumerable:!0})},u=(r,e,t,o)=>{if(e&&typeof e=="object"||typeof e=="function")for(let c of f(e))!l.call(r,c)&&c!==t&&i(r,c,{get:()=>e[c],enumerable:!(o=a(e,c))||o.enumerable});return r};var h=r=>u(i({},"__esModule",{value:!0}),r);var d={};n(d,{clearRequireCache:()=>q});module.exports=h(d);function q(r){for(let e of r)delete require.cache[e]}0&&(module.exports={clearRequireCache});
|
||||
45
lars/node_modules/@tailwindcss/node/package.json
generated
vendored
Normal file
45
lars/node_modules/@tailwindcss/node/package.json
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"name": "@tailwindcss/node",
|
||||
"version": "4.1.0",
|
||||
"description": "A utility-first CSS framework for rapidly building custom user interfaces.",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/tailwindlabs/tailwindcss.git",
|
||||
"directory": "packages/@tailwindcss-node"
|
||||
},
|
||||
"bugs": "https://github.com/tailwindlabs/tailwindcss/issues",
|
||||
"homepage": "https://tailwindcss.com",
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"publishConfig": {
|
||||
"provenance": true,
|
||||
"access": "public"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.mjs",
|
||||
"require": "./dist/index.js"
|
||||
},
|
||||
"./require-cache": {
|
||||
"types": "./dist/require-cache.d.ts",
|
||||
"default": "./dist/require-cache.js"
|
||||
},
|
||||
"./esm-cache-loader": {
|
||||
"types": "./dist/esm-cache.loader.d.mts",
|
||||
"default": "./dist/esm-cache.loader.mjs"
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"enhanced-resolve": "^5.18.1",
|
||||
"jiti": "^2.4.2",
|
||||
"lightningcss": "1.29.2",
|
||||
"tailwindcss": "4.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "tsup-node",
|
||||
"dev": "pnpm run build -- --watch"
|
||||
}
|
||||
}
|
||||
21
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/LICENSE
generated
vendored
Normal file
21
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Tailwind Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
3
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/README.md
generated
vendored
Normal file
3
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/README.md
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# `@tailwindcss/oxide-linux-x64-gnu`
|
||||
|
||||
This is the **x86_64-unknown-linux-gnu** binary for `@tailwindcss/oxide`
|
||||
30
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/package.json
generated
vendored
Normal file
30
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/package.json
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"name": "@tailwindcss/oxide-linux-x64-gnu",
|
||||
"version": "4.1.0",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/tailwindlabs/tailwindcss.git",
|
||||
"directory": "crates/node/npm/linux-x64-gnu"
|
||||
},
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"main": "tailwindcss-oxide.linux-x64-gnu.node",
|
||||
"files": [
|
||||
"tailwindcss-oxide.linux-x64-gnu.node"
|
||||
],
|
||||
"publishConfig": {
|
||||
"provenance": true,
|
||||
"access": "public"
|
||||
},
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
},
|
||||
"libc": [
|
||||
"glibc"
|
||||
]
|
||||
}
|
||||
BIN
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/tailwindcss-oxide.linux-x64-gnu.node
generated
vendored
Normal file
BIN
lars/node_modules/@tailwindcss/oxide-linux-x64-gnu/tailwindcss-oxide.linux-x64-gnu.node
generated
vendored
Normal file
Binary file not shown.
21
lars/node_modules/@tailwindcss/oxide/LICENSE
generated
vendored
Normal file
21
lars/node_modules/@tailwindcss/oxide/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Tailwind Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
46
lars/node_modules/@tailwindcss/oxide/index.d.ts
generated
vendored
Normal file
46
lars/node_modules/@tailwindcss/oxide/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
export interface ChangedContent {
|
||||
/** File path to the changed file */
|
||||
file?: string
|
||||
/** Contents of the changed file */
|
||||
content?: string
|
||||
/** File extension */
|
||||
extension: string
|
||||
}
|
||||
export interface GlobEntry {
|
||||
/** Base path of the glob */
|
||||
base: string
|
||||
/** Glob pattern */
|
||||
pattern: string
|
||||
}
|
||||
export interface SourceEntry {
|
||||
/** Base path of the glob */
|
||||
base: string
|
||||
/** Glob pattern */
|
||||
pattern: string
|
||||
/** Negated flag */
|
||||
negated: boolean
|
||||
}
|
||||
export interface ScannerOptions {
|
||||
/** Glob sources */
|
||||
sources?: Array<SourceEntry>
|
||||
}
|
||||
export interface CandidateWithPosition {
|
||||
/** The candidate string */
|
||||
candidate: string
|
||||
/** The position of the candidate inside the content file */
|
||||
position: number
|
||||
}
|
||||
export declare class Scanner {
|
||||
constructor(opts: ScannerOptions)
|
||||
scan(): Array<string>
|
||||
scanFiles(input: Array<ChangedContent>): Array<string>
|
||||
getCandidatesWithPositions(input: ChangedContent): Array<CandidateWithPosition>
|
||||
get files(): Array<string>
|
||||
get globs(): Array<GlobEntry>
|
||||
get normalizedSources(): Array<GlobEntry>
|
||||
}
|
||||
315
lars/node_modules/@tailwindcss/oxide/index.js
generated
vendored
Normal file
315
lars/node_modules/@tailwindcss/oxide/index.js
generated
vendored
Normal file
@@ -0,0 +1,315 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
/* prettier-ignore */
|
||||
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
const { existsSync, readFileSync } = require('fs')
|
||||
const { join } = require('path')
|
||||
|
||||
const { platform, arch } = process
|
||||
|
||||
let nativeBinding = null
|
||||
let localFileExisted = false
|
||||
let loadError = null
|
||||
|
||||
function isMusl() {
|
||||
// For Node 10
|
||||
if (!process.report || typeof process.report.getReport !== 'function') {
|
||||
try {
|
||||
const lddPath = require('child_process').execSync('which ldd').toString().trim()
|
||||
return readFileSync(lddPath, 'utf8').includes('musl')
|
||||
} catch (e) {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
const { glibcVersionRuntime } = process.report.getReport().header
|
||||
return !glibcVersionRuntime
|
||||
}
|
||||
}
|
||||
|
||||
switch (platform) {
|
||||
case 'android':
|
||||
switch (arch) {
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(join(__dirname, 'tailwindcss-oxide.android-arm64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.android-arm64.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-android-arm64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'arm':
|
||||
localFileExisted = existsSync(join(__dirname, 'tailwindcss-oxide.android-arm-eabi.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.android-arm-eabi.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-android-arm-eabi')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Android ${arch}`)
|
||||
}
|
||||
break
|
||||
case 'win32':
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.win32-x64-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.win32-x64-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-win32-x64-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'ia32':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.win32-ia32-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.win32-ia32-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-win32-ia32-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.win32-arm64-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.win32-arm64-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-win32-arm64-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Windows: ${arch}`)
|
||||
}
|
||||
break
|
||||
case 'darwin':
|
||||
localFileExisted = existsSync(join(__dirname, 'tailwindcss-oxide.darwin-universal.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.darwin-universal.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-darwin-universal')
|
||||
}
|
||||
break
|
||||
} catch {}
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
localFileExisted = existsSync(join(__dirname, 'tailwindcss-oxide.darwin-x64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.darwin-x64.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-darwin-x64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.darwin-arm64.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.darwin-arm64.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-darwin-arm64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on macOS: ${arch}`)
|
||||
}
|
||||
break
|
||||
case 'freebsd':
|
||||
if (arch !== 'x64') {
|
||||
throw new Error(`Unsupported architecture on FreeBSD: ${arch}`)
|
||||
}
|
||||
localFileExisted = existsSync(join(__dirname, 'tailwindcss-oxide.freebsd-x64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.freebsd-x64.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-freebsd-x64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'linux':
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-x64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-x64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-x64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-x64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-x64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-x64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break
|
||||
case 'arm64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-arm64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-arm64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-arm64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-arm64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-arm64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-arm64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break
|
||||
case 'arm':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-arm-musleabihf.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-arm-musleabihf.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-arm-musleabihf')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-arm-gnueabihf.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-arm-gnueabihf.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-arm-gnueabihf')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break
|
||||
case 'riscv64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-riscv64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-riscv64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-riscv64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-riscv64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-riscv64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-riscv64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break
|
||||
case 's390x':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'tailwindcss-oxide.linux-s390x-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./tailwindcss-oxide.linux-s390x-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('@tailwindcss/oxide-linux-s390x-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Linux: ${arch}`)
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`)
|
||||
}
|
||||
|
||||
if (!nativeBinding) {
|
||||
if (loadError) {
|
||||
throw loadError
|
||||
}
|
||||
throw new Error(`Failed to load native binding`)
|
||||
}
|
||||
|
||||
const { Scanner } = nativeBinding
|
||||
|
||||
module.exports.Scanner = Scanner
|
||||
63
lars/node_modules/@tailwindcss/oxide/package.json
generated
vendored
Normal file
63
lars/node_modules/@tailwindcss/oxide/package.json
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"name": "@tailwindcss/oxide",
|
||||
"version": "4.1.0",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/tailwindlabs/tailwindcss.git",
|
||||
"directory": "crates/node"
|
||||
},
|
||||
"main": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"napi": {
|
||||
"name": "tailwindcss-oxide",
|
||||
"triples": {
|
||||
"additional": [
|
||||
"armv7-linux-androideabi",
|
||||
"aarch64-linux-android",
|
||||
"aarch64-apple-darwin",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"armv7-unknown-linux-gnueabihf",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"x86_64-unknown-freebsd",
|
||||
"i686-pc-windows-msvc",
|
||||
"aarch64-pc-windows-msvc"
|
||||
]
|
||||
}
|
||||
},
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^2.18.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.d.ts"
|
||||
],
|
||||
"publishConfig": {
|
||||
"provenance": true,
|
||||
"access": "public"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@tailwindcss/oxide-android-arm64": "4.1.0",
|
||||
"@tailwindcss/oxide-darwin-arm64": "4.1.0",
|
||||
"@tailwindcss/oxide-darwin-x64": "4.1.0",
|
||||
"@tailwindcss/oxide-freebsd-x64": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-arm64-gnu": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-arm64-musl": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-x64-gnu": "4.1.0",
|
||||
"@tailwindcss/oxide-win32-x64-msvc": "4.1.0",
|
||||
"@tailwindcss/oxide-linux-x64-musl": "4.1.0",
|
||||
"@tailwindcss/oxide-win32-arm64-msvc": "4.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"artifacts": "napi artifacts",
|
||||
"build": "napi build --platform --release --no-const-enum",
|
||||
"dev": "cargo watch --quiet --shell 'npm run build'",
|
||||
"build:debug": "napi build --platform --no-const-enum",
|
||||
"version": "napi version"
|
||||
}
|
||||
}
|
||||
21
lars/node_modules/braces/LICENSE
generated
vendored
Normal file
21
lars/node_modules/braces/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014-present, Jon Schlinkert.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
586
lars/node_modules/braces/README.md
generated
vendored
Normal file
586
lars/node_modules/braces/README.md
generated
vendored
Normal file
@@ -0,0 +1,586 @@
|
||||
# braces [](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=W8YFZ425KND68) [](https://www.npmjs.com/package/braces) [](https://npmjs.org/package/braces) [](https://npmjs.org/package/braces) [](https://travis-ci.org/micromatch/braces)
|
||||
|
||||
> Bash-like brace expansion, implemented in JavaScript. Safer than other brace expansion libs, with complete support for the Bash 4.3 braces specification, without sacrificing speed.
|
||||
|
||||
Please consider following this project's author, [Jon Schlinkert](https://github.com/jonschlinkert), and consider starring the project to show your :heart: and support.
|
||||
|
||||
## Install
|
||||
|
||||
Install with [npm](https://www.npmjs.com/):
|
||||
|
||||
```sh
|
||||
$ npm install --save braces
|
||||
```
|
||||
|
||||
## v3.0.0 Released!!
|
||||
|
||||
See the [changelog](CHANGELOG.md) for details.
|
||||
|
||||
## Why use braces?
|
||||
|
||||
Brace patterns make globs more powerful by adding the ability to match specific ranges and sequences of characters.
|
||||
|
||||
- **Accurate** - complete support for the [Bash 4.3 Brace Expansion](www.gnu.org/software/bash/) specification (passes all of the Bash braces tests)
|
||||
- **[fast and performant](#benchmarks)** - Starts fast, runs fast and [scales well](#performance) as patterns increase in complexity.
|
||||
- **Organized code base** - The parser and compiler are easy to maintain and update when edge cases crop up.
|
||||
- **Well-tested** - Thousands of test assertions, and passes all of the Bash, minimatch, and [brace-expansion](https://github.com/juliangruber/brace-expansion) unit tests (as of the date this was written).
|
||||
- **Safer** - You shouldn't have to worry about users defining aggressive or malicious brace patterns that can break your application. Braces takes measures to prevent malicious regex that can be used for DDoS attacks (see [catastrophic backtracking](https://www.regular-expressions.info/catastrophic.html)).
|
||||
- [Supports lists](#lists) - (aka "sets") `a/{b,c}/d` => `['a/b/d', 'a/c/d']`
|
||||
- [Supports sequences](#sequences) - (aka "ranges") `{01..03}` => `['01', '02', '03']`
|
||||
- [Supports steps](#steps) - (aka "increments") `{2..10..2}` => `['2', '4', '6', '8', '10']`
|
||||
- [Supports escaping](#escaping) - To prevent evaluation of special characters.
|
||||
|
||||
## Usage
|
||||
|
||||
The main export is a function that takes one or more brace `patterns` and `options`.
|
||||
|
||||
```js
|
||||
const braces = require('braces');
|
||||
// braces(patterns[, options]);
|
||||
|
||||
console.log(braces(['{01..05}', '{a..e}']));
|
||||
//=> ['(0[1-5])', '([a-e])']
|
||||
|
||||
console.log(braces(['{01..05}', '{a..e}'], { expand: true }));
|
||||
//=> ['01', '02', '03', '04', '05', 'a', 'b', 'c', 'd', 'e']
|
||||
```
|
||||
|
||||
### Brace Expansion vs. Compilation
|
||||
|
||||
By default, brace patterns are compiled into strings that are optimized for creating regular expressions and matching.
|
||||
|
||||
**Compiled**
|
||||
|
||||
```js
|
||||
console.log(braces('a/{x,y,z}/b'));
|
||||
//=> ['a/(x|y|z)/b']
|
||||
console.log(braces(['a/{01..20}/b', 'a/{1..5}/b']));
|
||||
//=> [ 'a/(0[1-9]|1[0-9]|20)/b', 'a/([1-5])/b' ]
|
||||
```
|
||||
|
||||
**Expanded**
|
||||
|
||||
Enable brace expansion by setting the `expand` option to true, or by using [braces.expand()](#expand) (returns an array similar to what you'd expect from Bash, or `echo {1..5}`, or [minimatch](https://github.com/isaacs/minimatch)):
|
||||
|
||||
```js
|
||||
console.log(braces('a/{x,y,z}/b', { expand: true }));
|
||||
//=> ['a/x/b', 'a/y/b', 'a/z/b']
|
||||
|
||||
console.log(braces.expand('{01..10}'));
|
||||
//=> ['01','02','03','04','05','06','07','08','09','10']
|
||||
```
|
||||
|
||||
### Lists
|
||||
|
||||
Expand lists (like Bash "sets"):
|
||||
|
||||
```js
|
||||
console.log(braces('a/{foo,bar,baz}/*.js'));
|
||||
//=> ['a/(foo|bar|baz)/*.js']
|
||||
|
||||
console.log(braces.expand('a/{foo,bar,baz}/*.js'));
|
||||
//=> ['a/foo/*.js', 'a/bar/*.js', 'a/baz/*.js']
|
||||
```
|
||||
|
||||
### Sequences
|
||||
|
||||
Expand ranges of characters (like Bash "sequences"):
|
||||
|
||||
```js
|
||||
console.log(braces.expand('{1..3}')); // ['1', '2', '3']
|
||||
console.log(braces.expand('a/{1..3}/b')); // ['a/1/b', 'a/2/b', 'a/3/b']
|
||||
console.log(braces('{a..c}', { expand: true })); // ['a', 'b', 'c']
|
||||
console.log(braces('foo/{a..c}', { expand: true })); // ['foo/a', 'foo/b', 'foo/c']
|
||||
|
||||
// supports zero-padded ranges
|
||||
console.log(braces('a/{01..03}/b')); //=> ['a/(0[1-3])/b']
|
||||
console.log(braces('a/{001..300}/b')); //=> ['a/(0{2}[1-9]|0[1-9][0-9]|[12][0-9]{2}|300)/b']
|
||||
```
|
||||
|
||||
See [fill-range](https://github.com/jonschlinkert/fill-range) for all available range-expansion options.
|
||||
|
||||
### Steppped ranges
|
||||
|
||||
Steps, or increments, may be used with ranges:
|
||||
|
||||
```js
|
||||
console.log(braces.expand('{2..10..2}'));
|
||||
//=> ['2', '4', '6', '8', '10']
|
||||
|
||||
console.log(braces('{2..10..2}'));
|
||||
//=> ['(2|4|6|8|10)']
|
||||
```
|
||||
|
||||
When the [.optimize](#optimize) method is used, or [options.optimize](#optionsoptimize) is set to true, sequences are passed to [to-regex-range](https://github.com/jonschlinkert/to-regex-range) for expansion.
|
||||
|
||||
### Nesting
|
||||
|
||||
Brace patterns may be nested. The results of each expanded string are not sorted, and left to right order is preserved.
|
||||
|
||||
**"Expanded" braces**
|
||||
|
||||
```js
|
||||
console.log(braces.expand('a{b,c,/{x,y}}/e'));
|
||||
//=> ['ab/e', 'ac/e', 'a/x/e', 'a/y/e']
|
||||
|
||||
console.log(braces.expand('a/{x,{1..5},y}/c'));
|
||||
//=> ['a/x/c', 'a/1/c', 'a/2/c', 'a/3/c', 'a/4/c', 'a/5/c', 'a/y/c']
|
||||
```
|
||||
|
||||
**"Optimized" braces**
|
||||
|
||||
```js
|
||||
console.log(braces('a{b,c,/{x,y}}/e'));
|
||||
//=> ['a(b|c|/(x|y))/e']
|
||||
|
||||
console.log(braces('a/{x,{1..5},y}/c'));
|
||||
//=> ['a/(x|([1-5])|y)/c']
|
||||
```
|
||||
|
||||
### Escaping
|
||||
|
||||
**Escaping braces**
|
||||
|
||||
A brace pattern will not be expanded or evaluted if _either the opening or closing brace is escaped_:
|
||||
|
||||
```js
|
||||
console.log(braces.expand('a\\{d,c,b}e'));
|
||||
//=> ['a{d,c,b}e']
|
||||
|
||||
console.log(braces.expand('a{d,c,b\\}e'));
|
||||
//=> ['a{d,c,b}e']
|
||||
```
|
||||
|
||||
**Escaping commas**
|
||||
|
||||
Commas inside braces may also be escaped:
|
||||
|
||||
```js
|
||||
console.log(braces.expand('a{b\\,c}d'));
|
||||
//=> ['a{b,c}d']
|
||||
|
||||
console.log(braces.expand('a{d\\,c,b}e'));
|
||||
//=> ['ad,ce', 'abe']
|
||||
```
|
||||
|
||||
**Single items**
|
||||
|
||||
Following bash conventions, a brace pattern is also not expanded when it contains a single character:
|
||||
|
||||
```js
|
||||
console.log(braces.expand('a{b}c'));
|
||||
//=> ['a{b}c']
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
### options.maxLength
|
||||
|
||||
**Type**: `Number`
|
||||
|
||||
**Default**: `10,000`
|
||||
|
||||
**Description**: Limit the length of the input string. Useful when the input string is generated or your application allows users to pass a string, et cetera.
|
||||
|
||||
```js
|
||||
console.log(braces('a/{b,c}/d', { maxLength: 3 })); //=> throws an error
|
||||
```
|
||||
|
||||
### options.expand
|
||||
|
||||
**Type**: `Boolean`
|
||||
|
||||
**Default**: `undefined`
|
||||
|
||||
**Description**: Generate an "expanded" brace pattern (alternatively you can use the `braces.expand()` method, which does the same thing).
|
||||
|
||||
```js
|
||||
console.log(braces('a/{b,c}/d', { expand: true }));
|
||||
//=> [ 'a/b/d', 'a/c/d' ]
|
||||
```
|
||||
|
||||
### options.nodupes
|
||||
|
||||
**Type**: `Boolean`
|
||||
|
||||
**Default**: `undefined`
|
||||
|
||||
**Description**: Remove duplicates from the returned array.
|
||||
|
||||
### options.rangeLimit
|
||||
|
||||
**Type**: `Number`
|
||||
|
||||
**Default**: `1000`
|
||||
|
||||
**Description**: To prevent malicious patterns from being passed by users, an error is thrown when `braces.expand()` is used or `options.expand` is true and the generated range will exceed the `rangeLimit`.
|
||||
|
||||
You can customize `options.rangeLimit` or set it to `Inifinity` to disable this altogether.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
// pattern exceeds the "rangeLimit", so it's optimized automatically
|
||||
console.log(braces.expand('{1..1000}'));
|
||||
//=> ['([1-9]|[1-9][0-9]{1,2}|1000)']
|
||||
|
||||
// pattern does not exceed "rangeLimit", so it's NOT optimized
|
||||
console.log(braces.expand('{1..100}'));
|
||||
//=> ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100']
|
||||
```
|
||||
|
||||
### options.transform
|
||||
|
||||
**Type**: `Function`
|
||||
|
||||
**Default**: `undefined`
|
||||
|
||||
**Description**: Customize range expansion.
|
||||
|
||||
**Example: Transforming non-numeric values**
|
||||
|
||||
```js
|
||||
const alpha = braces.expand('x/{a..e}/y', {
|
||||
transform(value, index) {
|
||||
// When non-numeric values are passed, "value" is a character code.
|
||||
return 'foo/' + String.fromCharCode(value) + '-' + index;
|
||||
},
|
||||
});
|
||||
console.log(alpha);
|
||||
//=> [ 'x/foo/a-0/y', 'x/foo/b-1/y', 'x/foo/c-2/y', 'x/foo/d-3/y', 'x/foo/e-4/y' ]
|
||||
```
|
||||
|
||||
**Example: Transforming numeric values**
|
||||
|
||||
```js
|
||||
const numeric = braces.expand('{1..5}', {
|
||||
transform(value) {
|
||||
// when numeric values are passed, "value" is a number
|
||||
return 'foo/' + value * 2;
|
||||
},
|
||||
});
|
||||
console.log(numeric);
|
||||
//=> [ 'foo/2', 'foo/4', 'foo/6', 'foo/8', 'foo/10' ]
|
||||
```
|
||||
|
||||
### options.quantifiers
|
||||
|
||||
**Type**: `Boolean`
|
||||
|
||||
**Default**: `undefined`
|
||||
|
||||
**Description**: In regular expressions, quanitifiers can be used to specify how many times a token can be repeated. For example, `a{1,3}` will match the letter `a` one to three times.
|
||||
|
||||
Unfortunately, regex quantifiers happen to share the same syntax as [Bash lists](#lists)
|
||||
|
||||
The `quantifiers` option tells braces to detect when [regex quantifiers](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp#quantifiers) are defined in the given pattern, and not to try to expand them as lists.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
const braces = require('braces');
|
||||
console.log(braces('a/b{1,3}/{x,y,z}'));
|
||||
//=> [ 'a/b(1|3)/(x|y|z)' ]
|
||||
console.log(braces('a/b{1,3}/{x,y,z}', { quantifiers: true }));
|
||||
//=> [ 'a/b{1,3}/(x|y|z)' ]
|
||||
console.log(braces('a/b{1,3}/{x,y,z}', { quantifiers: true, expand: true }));
|
||||
//=> [ 'a/b{1,3}/x', 'a/b{1,3}/y', 'a/b{1,3}/z' ]
|
||||
```
|
||||
|
||||
### options.keepEscaping
|
||||
|
||||
**Type**: `Boolean`
|
||||
|
||||
**Default**: `undefined`
|
||||
|
||||
**Description**: Do not strip backslashes that were used for escaping from the result.
|
||||
|
||||
## What is "brace expansion"?
|
||||
|
||||
Brace expansion is a type of parameter expansion that was made popular by unix shells for generating lists of strings, as well as regex-like matching when used alongside wildcards (globs).
|
||||
|
||||
In addition to "expansion", braces are also used for matching. In other words:
|
||||
|
||||
- [brace expansion](#brace-expansion) is for generating new lists
|
||||
- [brace matching](#brace-matching) is for filtering existing lists
|
||||
|
||||
<details>
|
||||
<summary><strong>More about brace expansion</strong> (click to expand)</summary>
|
||||
|
||||
There are two main types of brace expansion:
|
||||
|
||||
1. **lists**: which are defined using comma-separated values inside curly braces: `{a,b,c}`
|
||||
2. **sequences**: which are defined using a starting value and an ending value, separated by two dots: `a{1..3}b`. Optionally, a third argument may be passed to define a "step" or increment to use: `a{1..100..10}b`. These are also sometimes referred to as "ranges".
|
||||
|
||||
Here are some example brace patterns to illustrate how they work:
|
||||
|
||||
**Sets**
|
||||
|
||||
```
|
||||
{a,b,c} => a b c
|
||||
{a,b,c}{1,2} => a1 a2 b1 b2 c1 c2
|
||||
```
|
||||
|
||||
**Sequences**
|
||||
|
||||
```
|
||||
{1..9} => 1 2 3 4 5 6 7 8 9
|
||||
{4..-4} => 4 3 2 1 0 -1 -2 -3 -4
|
||||
{1..20..3} => 1 4 7 10 13 16 19
|
||||
{a..j} => a b c d e f g h i j
|
||||
{j..a} => j i h g f e d c b a
|
||||
{a..z..3} => a d g j m p s v y
|
||||
```
|
||||
|
||||
**Combination**
|
||||
|
||||
Sets and sequences can be mixed together or used along with any other strings.
|
||||
|
||||
```
|
||||
{a,b,c}{1..3} => a1 a2 a3 b1 b2 b3 c1 c2 c3
|
||||
foo/{a,b,c}/bar => foo/a/bar foo/b/bar foo/c/bar
|
||||
```
|
||||
|
||||
The fact that braces can be "expanded" from relatively simple patterns makes them ideal for quickly generating test fixtures, file paths, and similar use cases.
|
||||
|
||||
## Brace matching
|
||||
|
||||
In addition to _expansion_, brace patterns are also useful for performing regular-expression-like matching.
|
||||
|
||||
For example, the pattern `foo/{1..3}/bar` would match any of following strings:
|
||||
|
||||
```
|
||||
foo/1/bar
|
||||
foo/2/bar
|
||||
foo/3/bar
|
||||
```
|
||||
|
||||
But not:
|
||||
|
||||
```
|
||||
baz/1/qux
|
||||
baz/2/qux
|
||||
baz/3/qux
|
||||
```
|
||||
|
||||
Braces can also be combined with [glob patterns](https://github.com/jonschlinkert/micromatch) to perform more advanced wildcard matching. For example, the pattern `*/{1..3}/*` would match any of following strings:
|
||||
|
||||
```
|
||||
foo/1/bar
|
||||
foo/2/bar
|
||||
foo/3/bar
|
||||
baz/1/qux
|
||||
baz/2/qux
|
||||
baz/3/qux
|
||||
```
|
||||
|
||||
## Brace matching pitfalls
|
||||
|
||||
Although brace patterns offer a user-friendly way of matching ranges or sets of strings, there are also some major disadvantages and potential risks you should be aware of.
|
||||
|
||||
### tldr
|
||||
|
||||
**"brace bombs"**
|
||||
|
||||
- brace expansion can eat up a huge amount of processing resources
|
||||
- as brace patterns increase _linearly in size_, the system resources required to expand the pattern increase exponentially
|
||||
- users can accidentally (or intentially) exhaust your system's resources resulting in the equivalent of a DoS attack (bonus: no programming knowledge is required!)
|
||||
|
||||
For a more detailed explanation with examples, see the [geometric complexity](#geometric-complexity) section.
|
||||
|
||||
### The solution
|
||||
|
||||
Jump to the [performance section](#performance) to see how Braces solves this problem in comparison to other libraries.
|
||||
|
||||
### Geometric complexity
|
||||
|
||||
At minimum, brace patterns with sets limited to two elements have quadradic or `O(n^2)` complexity. But the complexity of the algorithm increases exponentially as the number of sets, _and elements per set_, increases, which is `O(n^c)`.
|
||||
|
||||
For example, the following sets demonstrate quadratic (`O(n^2)`) complexity:
|
||||
|
||||
```
|
||||
{1,2}{3,4} => (2X2) => 13 14 23 24
|
||||
{1,2}{3,4}{5,6} => (2X2X2) => 135 136 145 146 235 236 245 246
|
||||
```
|
||||
|
||||
But add an element to a set, and we get a n-fold Cartesian product with `O(n^c)` complexity:
|
||||
|
||||
```
|
||||
{1,2,3}{4,5,6}{7,8,9} => (3X3X3) => 147 148 149 157 158 159 167 168 169 247 248
|
||||
249 257 258 259 267 268 269 347 348 349 357
|
||||
358 359 367 368 369
|
||||
```
|
||||
|
||||
Now, imagine how this complexity grows given that each element is a n-tuple:
|
||||
|
||||
```
|
||||
{1..100}{1..100} => (100X100) => 10,000 elements (38.4 kB)
|
||||
{1..100}{1..100}{1..100} => (100X100X100) => 1,000,000 elements (5.76 MB)
|
||||
```
|
||||
|
||||
Although these examples are clearly contrived, they demonstrate how brace patterns can quickly grow out of control.
|
||||
|
||||
**More information**
|
||||
|
||||
Interested in learning more about brace expansion?
|
||||
|
||||
- [linuxjournal/bash-brace-expansion](http://www.linuxjournal.com/content/bash-brace-expansion)
|
||||
- [rosettacode/Brace_expansion](https://rosettacode.org/wiki/Brace_expansion)
|
||||
- [cartesian product](https://en.wikipedia.org/wiki/Cartesian_product)
|
||||
|
||||
</details>
|
||||
|
||||
## Performance
|
||||
|
||||
Braces is not only screaming fast, it's also more accurate the other brace expansion libraries.
|
||||
|
||||
### Better algorithms
|
||||
|
||||
Fortunately there is a solution to the ["brace bomb" problem](#brace-matching-pitfalls): _don't expand brace patterns into an array when they're used for matching_.
|
||||
|
||||
Instead, convert the pattern into an optimized regular expression. This is easier said than done, and braces is the only library that does this currently.
|
||||
|
||||
**The proof is in the numbers**
|
||||
|
||||
Minimatch gets exponentially slower as patterns increase in complexity, braces does not. The following results were generated using `braces()` and `minimatch.braceExpand()`, respectively.
|
||||
|
||||
| **Pattern** | **braces** | **[minimatch][]** |
|
||||
| --------------------------- | ------------------- | ---------------------------- |
|
||||
| `{1..9007199254740991}`[^1] | `298 B` (5ms 459μs) | N/A (freezes) |
|
||||
| `{1..1000000000000000}` | `41 B` (1ms 15μs) | N/A (freezes) |
|
||||
| `{1..100000000000000}` | `40 B` (890μs) | N/A (freezes) |
|
||||
| `{1..10000000000000}` | `39 B` (2ms 49μs) | N/A (freezes) |
|
||||
| `{1..1000000000000}` | `38 B` (608μs) | N/A (freezes) |
|
||||
| `{1..100000000000}` | `37 B` (397μs) | N/A (freezes) |
|
||||
| `{1..10000000000}` | `35 B` (983μs) | N/A (freezes) |
|
||||
| `{1..1000000000}` | `34 B` (798μs) | N/A (freezes) |
|
||||
| `{1..100000000}` | `33 B` (733μs) | N/A (freezes) |
|
||||
| `{1..10000000}` | `32 B` (5ms 632μs) | `78.89 MB` (16s 388ms 569μs) |
|
||||
| `{1..1000000}` | `31 B` (1ms 381μs) | `6.89 MB` (1s 496ms 887μs) |
|
||||
| `{1..100000}` | `30 B` (950μs) | `588.89 kB` (146ms 921μs) |
|
||||
| `{1..10000}` | `29 B` (1ms 114μs) | `48.89 kB` (14ms 187μs) |
|
||||
| `{1..1000}` | `28 B` (760μs) | `3.89 kB` (1ms 453μs) |
|
||||
| `{1..100}` | `22 B` (345μs) | `291 B` (196μs) |
|
||||
| `{1..10}` | `10 B` (533μs) | `20 B` (37μs) |
|
||||
| `{1..3}` | `7 B` (190μs) | `5 B` (27μs) |
|
||||
|
||||
### Faster algorithms
|
||||
|
||||
When you need expansion, braces is still much faster.
|
||||
|
||||
_(the following results were generated using `braces.expand()` and `minimatch.braceExpand()`, respectively)_
|
||||
|
||||
| **Pattern** | **braces** | **[minimatch][]** |
|
||||
| --------------- | --------------------------- | ---------------------------- |
|
||||
| `{1..10000000}` | `78.89 MB` (2s 698ms 642μs) | `78.89 MB` (18s 601ms 974μs) |
|
||||
| `{1..1000000}` | `6.89 MB` (458ms 576μs) | `6.89 MB` (1s 491ms 621μs) |
|
||||
| `{1..100000}` | `588.89 kB` (20ms 728μs) | `588.89 kB` (156ms 919μs) |
|
||||
| `{1..10000}` | `48.89 kB` (2ms 202μs) | `48.89 kB` (13ms 641μs) |
|
||||
| `{1..1000}` | `3.89 kB` (1ms 796μs) | `3.89 kB` (1ms 958μs) |
|
||||
| `{1..100}` | `291 B` (424μs) | `291 B` (211μs) |
|
||||
| `{1..10}` | `20 B` (487μs) | `20 B` (72μs) |
|
||||
| `{1..3}` | `5 B` (166μs) | `5 B` (27μs) |
|
||||
|
||||
If you'd like to run these comparisons yourself, see [test/support/generate.js](test/support/generate.js).
|
||||
|
||||
## Benchmarks
|
||||
|
||||
### Running benchmarks
|
||||
|
||||
Install dev dependencies:
|
||||
|
||||
```bash
|
||||
npm i -d && npm benchmark
|
||||
```
|
||||
|
||||
### Latest results
|
||||
|
||||
Braces is more accurate, without sacrificing performance.
|
||||
|
||||
```bash
|
||||
● expand - range (expanded)
|
||||
braces x 53,167 ops/sec ±0.12% (102 runs sampled)
|
||||
minimatch x 11,378 ops/sec ±0.10% (102 runs sampled)
|
||||
● expand - range (optimized for regex)
|
||||
braces x 373,442 ops/sec ±0.04% (100 runs sampled)
|
||||
minimatch x 3,262 ops/sec ±0.18% (100 runs sampled)
|
||||
● expand - nested ranges (expanded)
|
||||
braces x 33,921 ops/sec ±0.09% (99 runs sampled)
|
||||
minimatch x 10,855 ops/sec ±0.28% (100 runs sampled)
|
||||
● expand - nested ranges (optimized for regex)
|
||||
braces x 287,479 ops/sec ±0.52% (98 runs sampled)
|
||||
minimatch x 3,219 ops/sec ±0.28% (101 runs sampled)
|
||||
● expand - set (expanded)
|
||||
braces x 238,243 ops/sec ±0.19% (97 runs sampled)
|
||||
minimatch x 538,268 ops/sec ±0.31% (96 runs sampled)
|
||||
● expand - set (optimized for regex)
|
||||
braces x 321,844 ops/sec ±0.10% (97 runs sampled)
|
||||
minimatch x 140,600 ops/sec ±0.15% (100 runs sampled)
|
||||
● expand - nested sets (expanded)
|
||||
braces x 165,371 ops/sec ±0.42% (96 runs sampled)
|
||||
minimatch x 337,720 ops/sec ±0.28% (100 runs sampled)
|
||||
● expand - nested sets (optimized for regex)
|
||||
braces x 242,948 ops/sec ±0.12% (99 runs sampled)
|
||||
minimatch x 87,403 ops/sec ±0.79% (96 runs sampled)
|
||||
```
|
||||
|
||||
## About
|
||||
|
||||
<details>
|
||||
<summary><strong>Contributing</strong></summary>
|
||||
|
||||
Pull requests and stars are always welcome. For bugs and feature requests, [please create an issue](../../issues/new).
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Running Tests</strong></summary>
|
||||
|
||||
Running and reviewing unit tests is a great way to get familiarized with a library and its API. You can install dependencies and run tests with the following command:
|
||||
|
||||
```sh
|
||||
$ npm install && npm test
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Building docs</strong></summary>
|
||||
|
||||
_(This project's readme.md is generated by [verb](https://github.com/verbose/verb-generate-readme), please don't edit the readme directly. Any changes to the readme must be made in the [.verb.md](.verb.md) readme template.)_
|
||||
|
||||
To generate the readme, run the following command:
|
||||
|
||||
```sh
|
||||
$ npm install -g verbose/verb#dev verb-generate-readme && verb
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Contributors
|
||||
|
||||
| **Commits** | **Contributor** |
|
||||
| ----------- | ------------------------------------------------------------- |
|
||||
| 197 | [jonschlinkert](https://github.com/jonschlinkert) |
|
||||
| 4 | [doowb](https://github.com/doowb) |
|
||||
| 1 | [es128](https://github.com/es128) |
|
||||
| 1 | [eush77](https://github.com/eush77) |
|
||||
| 1 | [hemanth](https://github.com/hemanth) |
|
||||
| 1 | [wtgtybhertgeghgtwtg](https://github.com/wtgtybhertgeghgtwtg) |
|
||||
|
||||
### Author
|
||||
|
||||
**Jon Schlinkert**
|
||||
|
||||
- [GitHub Profile](https://github.com/jonschlinkert)
|
||||
- [Twitter Profile](https://twitter.com/jonschlinkert)
|
||||
- [LinkedIn Profile](https://linkedin.com/in/jonschlinkert)
|
||||
|
||||
### License
|
||||
|
||||
Copyright © 2019, [Jon Schlinkert](https://github.com/jonschlinkert).
|
||||
Released under the [MIT License](LICENSE).
|
||||
|
||||
---
|
||||
|
||||
_This file was generated by [verb-generate-readme](https://github.com/verbose/verb-generate-readme), v0.8.0, on April 08, 2019._
|
||||
170
lars/node_modules/braces/index.js
generated
vendored
Normal file
170
lars/node_modules/braces/index.js
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
'use strict';
|
||||
|
||||
const stringify = require('./lib/stringify');
|
||||
const compile = require('./lib/compile');
|
||||
const expand = require('./lib/expand');
|
||||
const parse = require('./lib/parse');
|
||||
|
||||
/**
|
||||
* Expand the given pattern or create a regex-compatible string.
|
||||
*
|
||||
* ```js
|
||||
* const braces = require('braces');
|
||||
* console.log(braces('{a,b,c}', { compile: true })); //=> ['(a|b|c)']
|
||||
* console.log(braces('{a,b,c}')); //=> ['a', 'b', 'c']
|
||||
* ```
|
||||
* @param {String} `str`
|
||||
* @param {Object} `options`
|
||||
* @return {String}
|
||||
* @api public
|
||||
*/
|
||||
|
||||
const braces = (input, options = {}) => {
|
||||
let output = [];
|
||||
|
||||
if (Array.isArray(input)) {
|
||||
for (const pattern of input) {
|
||||
const result = braces.create(pattern, options);
|
||||
if (Array.isArray(result)) {
|
||||
output.push(...result);
|
||||
} else {
|
||||
output.push(result);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
output = [].concat(braces.create(input, options));
|
||||
}
|
||||
|
||||
if (options && options.expand === true && options.nodupes === true) {
|
||||
output = [...new Set(output)];
|
||||
}
|
||||
return output;
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse the given `str` with the given `options`.
|
||||
*
|
||||
* ```js
|
||||
* // braces.parse(pattern, [, options]);
|
||||
* const ast = braces.parse('a/{b,c}/d');
|
||||
* console.log(ast);
|
||||
* ```
|
||||
* @param {String} pattern Brace pattern to parse
|
||||
* @param {Object} options
|
||||
* @return {Object} Returns an AST
|
||||
* @api public
|
||||
*/
|
||||
|
||||
braces.parse = (input, options = {}) => parse(input, options);
|
||||
|
||||
/**
|
||||
* Creates a braces string from an AST, or an AST node.
|
||||
*
|
||||
* ```js
|
||||
* const braces = require('braces');
|
||||
* let ast = braces.parse('foo/{a,b}/bar');
|
||||
* console.log(stringify(ast.nodes[2])); //=> '{a,b}'
|
||||
* ```
|
||||
* @param {String} `input` Brace pattern or AST.
|
||||
* @param {Object} `options`
|
||||
* @return {Array} Returns an array of expanded values.
|
||||
* @api public
|
||||
*/
|
||||
|
||||
braces.stringify = (input, options = {}) => {
|
||||
if (typeof input === 'string') {
|
||||
return stringify(braces.parse(input, options), options);
|
||||
}
|
||||
return stringify(input, options);
|
||||
};
|
||||
|
||||
/**
|
||||
* Compiles a brace pattern into a regex-compatible, optimized string.
|
||||
* This method is called by the main [braces](#braces) function by default.
|
||||
*
|
||||
* ```js
|
||||
* const braces = require('braces');
|
||||
* console.log(braces.compile('a/{b,c}/d'));
|
||||
* //=> ['a/(b|c)/d']
|
||||
* ```
|
||||
* @param {String} `input` Brace pattern or AST.
|
||||
* @param {Object} `options`
|
||||
* @return {Array} Returns an array of expanded values.
|
||||
* @api public
|
||||
*/
|
||||
|
||||
braces.compile = (input, options = {}) => {
|
||||
if (typeof input === 'string') {
|
||||
input = braces.parse(input, options);
|
||||
}
|
||||
return compile(input, options);
|
||||
};
|
||||
|
||||
/**
|
||||
* Expands a brace pattern into an array. This method is called by the
|
||||
* main [braces](#braces) function when `options.expand` is true. Before
|
||||
* using this method it's recommended that you read the [performance notes](#performance))
|
||||
* and advantages of using [.compile](#compile) instead.
|
||||
*
|
||||
* ```js
|
||||
* const braces = require('braces');
|
||||
* console.log(braces.expand('a/{b,c}/d'));
|
||||
* //=> ['a/b/d', 'a/c/d'];
|
||||
* ```
|
||||
* @param {String} `pattern` Brace pattern
|
||||
* @param {Object} `options`
|
||||
* @return {Array} Returns an array of expanded values.
|
||||
* @api public
|
||||
*/
|
||||
|
||||
braces.expand = (input, options = {}) => {
|
||||
if (typeof input === 'string') {
|
||||
input = braces.parse(input, options);
|
||||
}
|
||||
|
||||
let result = expand(input, options);
|
||||
|
||||
// filter out empty strings if specified
|
||||
if (options.noempty === true) {
|
||||
result = result.filter(Boolean);
|
||||
}
|
||||
|
||||
// filter out duplicates if specified
|
||||
if (options.nodupes === true) {
|
||||
result = [...new Set(result)];
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Processes a brace pattern and returns either an expanded array
|
||||
* (if `options.expand` is true), a highly optimized regex-compatible string.
|
||||
* This method is called by the main [braces](#braces) function.
|
||||
*
|
||||
* ```js
|
||||
* const braces = require('braces');
|
||||
* console.log(braces.create('user-{200..300}/project-{a,b,c}-{1..10}'))
|
||||
* //=> 'user-(20[0-9]|2[1-9][0-9]|300)/project-(a|b|c)-([1-9]|10)'
|
||||
* ```
|
||||
* @param {String} `pattern` Brace pattern
|
||||
* @param {Object} `options`
|
||||
* @return {Array} Returns an array of expanded values.
|
||||
* @api public
|
||||
*/
|
||||
|
||||
braces.create = (input, options = {}) => {
|
||||
if (input === '' || input.length < 3) {
|
||||
return [input];
|
||||
}
|
||||
|
||||
return options.expand !== true
|
||||
? braces.compile(input, options)
|
||||
: braces.expand(input, options);
|
||||
};
|
||||
|
||||
/**
|
||||
* Expose "braces"
|
||||
*/
|
||||
|
||||
module.exports = braces;
|
||||
60
lars/node_modules/braces/lib/compile.js
generated
vendored
Normal file
60
lars/node_modules/braces/lib/compile.js
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
'use strict';
|
||||
|
||||
const fill = require('fill-range');
|
||||
const utils = require('./utils');
|
||||
|
||||
const compile = (ast, options = {}) => {
|
||||
const walk = (node, parent = {}) => {
|
||||
const invalidBlock = utils.isInvalidBrace(parent);
|
||||
const invalidNode = node.invalid === true && options.escapeInvalid === true;
|
||||
const invalid = invalidBlock === true || invalidNode === true;
|
||||
const prefix = options.escapeInvalid === true ? '\\' : '';
|
||||
let output = '';
|
||||
|
||||
if (node.isOpen === true) {
|
||||
return prefix + node.value;
|
||||
}
|
||||
|
||||
if (node.isClose === true) {
|
||||
console.log('node.isClose', prefix, node.value);
|
||||
return prefix + node.value;
|
||||
}
|
||||
|
||||
if (node.type === 'open') {
|
||||
return invalid ? prefix + node.value : '(';
|
||||
}
|
||||
|
||||
if (node.type === 'close') {
|
||||
return invalid ? prefix + node.value : ')';
|
||||
}
|
||||
|
||||
if (node.type === 'comma') {
|
||||
return node.prev.type === 'comma' ? '' : invalid ? node.value : '|';
|
||||
}
|
||||
|
||||
if (node.value) {
|
||||
return node.value;
|
||||
}
|
||||
|
||||
if (node.nodes && node.ranges > 0) {
|
||||
const args = utils.reduce(node.nodes);
|
||||
const range = fill(...args, { ...options, wrap: false, toRegex: true, strictZeros: true });
|
||||
|
||||
if (range.length !== 0) {
|
||||
return args.length > 1 && range.length > 1 ? `(${range})` : range;
|
||||
}
|
||||
}
|
||||
|
||||
if (node.nodes) {
|
||||
for (const child of node.nodes) {
|
||||
output += walk(child, node);
|
||||
}
|
||||
}
|
||||
|
||||
return output;
|
||||
};
|
||||
|
||||
return walk(ast);
|
||||
};
|
||||
|
||||
module.exports = compile;
|
||||
57
lars/node_modules/braces/lib/constants.js
generated
vendored
Normal file
57
lars/node_modules/braces/lib/constants.js
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
'use strict';
|
||||
|
||||
module.exports = {
|
||||
MAX_LENGTH: 10000,
|
||||
|
||||
// Digits
|
||||
CHAR_0: '0', /* 0 */
|
||||
CHAR_9: '9', /* 9 */
|
||||
|
||||
// Alphabet chars.
|
||||
CHAR_UPPERCASE_A: 'A', /* A */
|
||||
CHAR_LOWERCASE_A: 'a', /* a */
|
||||
CHAR_UPPERCASE_Z: 'Z', /* Z */
|
||||
CHAR_LOWERCASE_Z: 'z', /* z */
|
||||
|
||||
CHAR_LEFT_PARENTHESES: '(', /* ( */
|
||||
CHAR_RIGHT_PARENTHESES: ')', /* ) */
|
||||
|
||||
CHAR_ASTERISK: '*', /* * */
|
||||
|
||||
// Non-alphabetic chars.
|
||||
CHAR_AMPERSAND: '&', /* & */
|
||||
CHAR_AT: '@', /* @ */
|
||||
CHAR_BACKSLASH: '\\', /* \ */
|
||||
CHAR_BACKTICK: '`', /* ` */
|
||||
CHAR_CARRIAGE_RETURN: '\r', /* \r */
|
||||
CHAR_CIRCUMFLEX_ACCENT: '^', /* ^ */
|
||||
CHAR_COLON: ':', /* : */
|
||||
CHAR_COMMA: ',', /* , */
|
||||
CHAR_DOLLAR: '$', /* . */
|
||||
CHAR_DOT: '.', /* . */
|
||||
CHAR_DOUBLE_QUOTE: '"', /* " */
|
||||
CHAR_EQUAL: '=', /* = */
|
||||
CHAR_EXCLAMATION_MARK: '!', /* ! */
|
||||
CHAR_FORM_FEED: '\f', /* \f */
|
||||
CHAR_FORWARD_SLASH: '/', /* / */
|
||||
CHAR_HASH: '#', /* # */
|
||||
CHAR_HYPHEN_MINUS: '-', /* - */
|
||||
CHAR_LEFT_ANGLE_BRACKET: '<', /* < */
|
||||
CHAR_LEFT_CURLY_BRACE: '{', /* { */
|
||||
CHAR_LEFT_SQUARE_BRACKET: '[', /* [ */
|
||||
CHAR_LINE_FEED: '\n', /* \n */
|
||||
CHAR_NO_BREAK_SPACE: '\u00A0', /* \u00A0 */
|
||||
CHAR_PERCENT: '%', /* % */
|
||||
CHAR_PLUS: '+', /* + */
|
||||
CHAR_QUESTION_MARK: '?', /* ? */
|
||||
CHAR_RIGHT_ANGLE_BRACKET: '>', /* > */
|
||||
CHAR_RIGHT_CURLY_BRACE: '}', /* } */
|
||||
CHAR_RIGHT_SQUARE_BRACKET: ']', /* ] */
|
||||
CHAR_SEMICOLON: ';', /* ; */
|
||||
CHAR_SINGLE_QUOTE: '\'', /* ' */
|
||||
CHAR_SPACE: ' ', /* */
|
||||
CHAR_TAB: '\t', /* \t */
|
||||
CHAR_UNDERSCORE: '_', /* _ */
|
||||
CHAR_VERTICAL_LINE: '|', /* | */
|
||||
CHAR_ZERO_WIDTH_NOBREAK_SPACE: '\uFEFF' /* \uFEFF */
|
||||
};
|
||||
113
lars/node_modules/braces/lib/expand.js
generated
vendored
Normal file
113
lars/node_modules/braces/lib/expand.js
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
'use strict';
|
||||
|
||||
const fill = require('fill-range');
|
||||
const stringify = require('./stringify');
|
||||
const utils = require('./utils');
|
||||
|
||||
const append = (queue = '', stash = '', enclose = false) => {
|
||||
const result = [];
|
||||
|
||||
queue = [].concat(queue);
|
||||
stash = [].concat(stash);
|
||||
|
||||
if (!stash.length) return queue;
|
||||
if (!queue.length) {
|
||||
return enclose ? utils.flatten(stash).map(ele => `{${ele}}`) : stash;
|
||||
}
|
||||
|
||||
for (const item of queue) {
|
||||
if (Array.isArray(item)) {
|
||||
for (const value of item) {
|
||||
result.push(append(value, stash, enclose));
|
||||
}
|
||||
} else {
|
||||
for (let ele of stash) {
|
||||
if (enclose === true && typeof ele === 'string') ele = `{${ele}}`;
|
||||
result.push(Array.isArray(ele) ? append(item, ele, enclose) : item + ele);
|
||||
}
|
||||
}
|
||||
}
|
||||
return utils.flatten(result);
|
||||
};
|
||||
|
||||
const expand = (ast, options = {}) => {
|
||||
const rangeLimit = options.rangeLimit === undefined ? 1000 : options.rangeLimit;
|
||||
|
||||
const walk = (node, parent = {}) => {
|
||||
node.queue = [];
|
||||
|
||||
let p = parent;
|
||||
let q = parent.queue;
|
||||
|
||||
while (p.type !== 'brace' && p.type !== 'root' && p.parent) {
|
||||
p = p.parent;
|
||||
q = p.queue;
|
||||
}
|
||||
|
||||
if (node.invalid || node.dollar) {
|
||||
q.push(append(q.pop(), stringify(node, options)));
|
||||
return;
|
||||
}
|
||||
|
||||
if (node.type === 'brace' && node.invalid !== true && node.nodes.length === 2) {
|
||||
q.push(append(q.pop(), ['{}']));
|
||||
return;
|
||||
}
|
||||
|
||||
if (node.nodes && node.ranges > 0) {
|
||||
const args = utils.reduce(node.nodes);
|
||||
|
||||
if (utils.exceedsLimit(...args, options.step, rangeLimit)) {
|
||||
throw new RangeError('expanded array length exceeds range limit. Use options.rangeLimit to increase or disable the limit.');
|
||||
}
|
||||
|
||||
let range = fill(...args, options);
|
||||
if (range.length === 0) {
|
||||
range = stringify(node, options);
|
||||
}
|
||||
|
||||
q.push(append(q.pop(), range));
|
||||
node.nodes = [];
|
||||
return;
|
||||
}
|
||||
|
||||
const enclose = utils.encloseBrace(node);
|
||||
let queue = node.queue;
|
||||
let block = node;
|
||||
|
||||
while (block.type !== 'brace' && block.type !== 'root' && block.parent) {
|
||||
block = block.parent;
|
||||
queue = block.queue;
|
||||
}
|
||||
|
||||
for (let i = 0; i < node.nodes.length; i++) {
|
||||
const child = node.nodes[i];
|
||||
|
||||
if (child.type === 'comma' && node.type === 'brace') {
|
||||
if (i === 1) queue.push('');
|
||||
queue.push('');
|
||||
continue;
|
||||
}
|
||||
|
||||
if (child.type === 'close') {
|
||||
q.push(append(q.pop(), queue, enclose));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (child.value && child.type !== 'open') {
|
||||
queue.push(append(queue.pop(), child.value));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (child.nodes) {
|
||||
walk(child, node);
|
||||
}
|
||||
}
|
||||
|
||||
return queue;
|
||||
};
|
||||
|
||||
return utils.flatten(walk(ast));
|
||||
};
|
||||
|
||||
module.exports = expand;
|
||||
331
lars/node_modules/braces/lib/parse.js
generated
vendored
Normal file
331
lars/node_modules/braces/lib/parse.js
generated
vendored
Normal file
@@ -0,0 +1,331 @@
|
||||
'use strict';
|
||||
|
||||
const stringify = require('./stringify');
|
||||
|
||||
/**
|
||||
* Constants
|
||||
*/
|
||||
|
||||
const {
|
||||
MAX_LENGTH,
|
||||
CHAR_BACKSLASH, /* \ */
|
||||
CHAR_BACKTICK, /* ` */
|
||||
CHAR_COMMA, /* , */
|
||||
CHAR_DOT, /* . */
|
||||
CHAR_LEFT_PARENTHESES, /* ( */
|
||||
CHAR_RIGHT_PARENTHESES, /* ) */
|
||||
CHAR_LEFT_CURLY_BRACE, /* { */
|
||||
CHAR_RIGHT_CURLY_BRACE, /* } */
|
||||
CHAR_LEFT_SQUARE_BRACKET, /* [ */
|
||||
CHAR_RIGHT_SQUARE_BRACKET, /* ] */
|
||||
CHAR_DOUBLE_QUOTE, /* " */
|
||||
CHAR_SINGLE_QUOTE, /* ' */
|
||||
CHAR_NO_BREAK_SPACE,
|
||||
CHAR_ZERO_WIDTH_NOBREAK_SPACE
|
||||
} = require('./constants');
|
||||
|
||||
/**
|
||||
* parse
|
||||
*/
|
||||
|
||||
const parse = (input, options = {}) => {
|
||||
if (typeof input !== 'string') {
|
||||
throw new TypeError('Expected a string');
|
||||
}
|
||||
|
||||
const opts = options || {};
|
||||
const max = typeof opts.maxLength === 'number' ? Math.min(MAX_LENGTH, opts.maxLength) : MAX_LENGTH;
|
||||
if (input.length > max) {
|
||||
throw new SyntaxError(`Input length (${input.length}), exceeds max characters (${max})`);
|
||||
}
|
||||
|
||||
const ast = { type: 'root', input, nodes: [] };
|
||||
const stack = [ast];
|
||||
let block = ast;
|
||||
let prev = ast;
|
||||
let brackets = 0;
|
||||
const length = input.length;
|
||||
let index = 0;
|
||||
let depth = 0;
|
||||
let value;
|
||||
|
||||
/**
|
||||
* Helpers
|
||||
*/
|
||||
|
||||
const advance = () => input[index++];
|
||||
const push = node => {
|
||||
if (node.type === 'text' && prev.type === 'dot') {
|
||||
prev.type = 'text';
|
||||
}
|
||||
|
||||
if (prev && prev.type === 'text' && node.type === 'text') {
|
||||
prev.value += node.value;
|
||||
return;
|
||||
}
|
||||
|
||||
block.nodes.push(node);
|
||||
node.parent = block;
|
||||
node.prev = prev;
|
||||
prev = node;
|
||||
return node;
|
||||
};
|
||||
|
||||
push({ type: 'bos' });
|
||||
|
||||
while (index < length) {
|
||||
block = stack[stack.length - 1];
|
||||
value = advance();
|
||||
|
||||
/**
|
||||
* Invalid chars
|
||||
*/
|
||||
|
||||
if (value === CHAR_ZERO_WIDTH_NOBREAK_SPACE || value === CHAR_NO_BREAK_SPACE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Escaped chars
|
||||
*/
|
||||
|
||||
if (value === CHAR_BACKSLASH) {
|
||||
push({ type: 'text', value: (options.keepEscaping ? value : '') + advance() });
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Right square bracket (literal): ']'
|
||||
*/
|
||||
|
||||
if (value === CHAR_RIGHT_SQUARE_BRACKET) {
|
||||
push({ type: 'text', value: '\\' + value });
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Left square bracket: '['
|
||||
*/
|
||||
|
||||
if (value === CHAR_LEFT_SQUARE_BRACKET) {
|
||||
brackets++;
|
||||
|
||||
let next;
|
||||
|
||||
while (index < length && (next = advance())) {
|
||||
value += next;
|
||||
|
||||
if (next === CHAR_LEFT_SQUARE_BRACKET) {
|
||||
brackets++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (next === CHAR_BACKSLASH) {
|
||||
value += advance();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (next === CHAR_RIGHT_SQUARE_BRACKET) {
|
||||
brackets--;
|
||||
|
||||
if (brackets === 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
push({ type: 'text', value });
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parentheses
|
||||
*/
|
||||
|
||||
if (value === CHAR_LEFT_PARENTHESES) {
|
||||
block = push({ type: 'paren', nodes: [] });
|
||||
stack.push(block);
|
||||
push({ type: 'text', value });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (value === CHAR_RIGHT_PARENTHESES) {
|
||||
if (block.type !== 'paren') {
|
||||
push({ type: 'text', value });
|
||||
continue;
|
||||
}
|
||||
block = stack.pop();
|
||||
push({ type: 'text', value });
|
||||
block = stack[stack.length - 1];
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Quotes: '|"|`
|
||||
*/
|
||||
|
||||
if (value === CHAR_DOUBLE_QUOTE || value === CHAR_SINGLE_QUOTE || value === CHAR_BACKTICK) {
|
||||
const open = value;
|
||||
let next;
|
||||
|
||||
if (options.keepQuotes !== true) {
|
||||
value = '';
|
||||
}
|
||||
|
||||
while (index < length && (next = advance())) {
|
||||
if (next === CHAR_BACKSLASH) {
|
||||
value += next + advance();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (next === open) {
|
||||
if (options.keepQuotes === true) value += next;
|
||||
break;
|
||||
}
|
||||
|
||||
value += next;
|
||||
}
|
||||
|
||||
push({ type: 'text', value });
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Left curly brace: '{'
|
||||
*/
|
||||
|
||||
if (value === CHAR_LEFT_CURLY_BRACE) {
|
||||
depth++;
|
||||
|
||||
const dollar = prev.value && prev.value.slice(-1) === '$' || block.dollar === true;
|
||||
const brace = {
|
||||
type: 'brace',
|
||||
open: true,
|
||||
close: false,
|
||||
dollar,
|
||||
depth,
|
||||
commas: 0,
|
||||
ranges: 0,
|
||||
nodes: []
|
||||
};
|
||||
|
||||
block = push(brace);
|
||||
stack.push(block);
|
||||
push({ type: 'open', value });
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Right curly brace: '}'
|
||||
*/
|
||||
|
||||
if (value === CHAR_RIGHT_CURLY_BRACE) {
|
||||
if (block.type !== 'brace') {
|
||||
push({ type: 'text', value });
|
||||
continue;
|
||||
}
|
||||
|
||||
const type = 'close';
|
||||
block = stack.pop();
|
||||
block.close = true;
|
||||
|
||||
push({ type, value });
|
||||
depth--;
|
||||
|
||||
block = stack[stack.length - 1];
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Comma: ','
|
||||
*/
|
||||
|
||||
if (value === CHAR_COMMA && depth > 0) {
|
||||
if (block.ranges > 0) {
|
||||
block.ranges = 0;
|
||||
const open = block.nodes.shift();
|
||||
block.nodes = [open, { type: 'text', value: stringify(block) }];
|
||||
}
|
||||
|
||||
push({ type: 'comma', value });
|
||||
block.commas++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dot: '.'
|
||||
*/
|
||||
|
||||
if (value === CHAR_DOT && depth > 0 && block.commas === 0) {
|
||||
const siblings = block.nodes;
|
||||
|
||||
if (depth === 0 || siblings.length === 0) {
|
||||
push({ type: 'text', value });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prev.type === 'dot') {
|
||||
block.range = [];
|
||||
prev.value += value;
|
||||
prev.type = 'range';
|
||||
|
||||
if (block.nodes.length !== 3 && block.nodes.length !== 5) {
|
||||
block.invalid = true;
|
||||
block.ranges = 0;
|
||||
prev.type = 'text';
|
||||
continue;
|
||||
}
|
||||
|
||||
block.ranges++;
|
||||
block.args = [];
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prev.type === 'range') {
|
||||
siblings.pop();
|
||||
|
||||
const before = siblings[siblings.length - 1];
|
||||
before.value += prev.value + value;
|
||||
prev = before;
|
||||
block.ranges--;
|
||||
continue;
|
||||
}
|
||||
|
||||
push({ type: 'dot', value });
|
||||
continue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Text
|
||||
*/
|
||||
|
||||
push({ type: 'text', value });
|
||||
}
|
||||
|
||||
// Mark imbalanced braces and brackets as invalid
|
||||
do {
|
||||
block = stack.pop();
|
||||
|
||||
if (block.type !== 'root') {
|
||||
block.nodes.forEach(node => {
|
||||
if (!node.nodes) {
|
||||
if (node.type === 'open') node.isOpen = true;
|
||||
if (node.type === 'close') node.isClose = true;
|
||||
if (!node.nodes) node.type = 'text';
|
||||
node.invalid = true;
|
||||
}
|
||||
});
|
||||
|
||||
// get the location of the block on parent.nodes (block's siblings)
|
||||
const parent = stack[stack.length - 1];
|
||||
const index = parent.nodes.indexOf(block);
|
||||
// replace the (invalid) block with it's nodes
|
||||
parent.nodes.splice(index, 1, ...block.nodes);
|
||||
}
|
||||
} while (stack.length > 0);
|
||||
|
||||
push({ type: 'eos' });
|
||||
return ast;
|
||||
};
|
||||
|
||||
module.exports = parse;
|
||||
32
lars/node_modules/braces/lib/stringify.js
generated
vendored
Normal file
32
lars/node_modules/braces/lib/stringify.js
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
'use strict';
|
||||
|
||||
const utils = require('./utils');
|
||||
|
||||
module.exports = (ast, options = {}) => {
|
||||
const stringify = (node, parent = {}) => {
|
||||
const invalidBlock = options.escapeInvalid && utils.isInvalidBrace(parent);
|
||||
const invalidNode = node.invalid === true && options.escapeInvalid === true;
|
||||
let output = '';
|
||||
|
||||
if (node.value) {
|
||||
if ((invalidBlock || invalidNode) && utils.isOpenOrClose(node)) {
|
||||
return '\\' + node.value;
|
||||
}
|
||||
return node.value;
|
||||
}
|
||||
|
||||
if (node.value) {
|
||||
return node.value;
|
||||
}
|
||||
|
||||
if (node.nodes) {
|
||||
for (const child of node.nodes) {
|
||||
output += stringify(child);
|
||||
}
|
||||
}
|
||||
return output;
|
||||
};
|
||||
|
||||
return stringify(ast);
|
||||
};
|
||||
|
||||
122
lars/node_modules/braces/lib/utils.js
generated
vendored
Normal file
122
lars/node_modules/braces/lib/utils.js
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
'use strict';
|
||||
|
||||
exports.isInteger = num => {
|
||||
if (typeof num === 'number') {
|
||||
return Number.isInteger(num);
|
||||
}
|
||||
if (typeof num === 'string' && num.trim() !== '') {
|
||||
return Number.isInteger(Number(num));
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Find a node of the given type
|
||||
*/
|
||||
|
||||
exports.find = (node, type) => node.nodes.find(node => node.type === type);
|
||||
|
||||
/**
|
||||
* Find a node of the given type
|
||||
*/
|
||||
|
||||
exports.exceedsLimit = (min, max, step = 1, limit) => {
|
||||
if (limit === false) return false;
|
||||
if (!exports.isInteger(min) || !exports.isInteger(max)) return false;
|
||||
return ((Number(max) - Number(min)) / Number(step)) >= limit;
|
||||
};
|
||||
|
||||
/**
|
||||
* Escape the given node with '\\' before node.value
|
||||
*/
|
||||
|
||||
exports.escapeNode = (block, n = 0, type) => {
|
||||
const node = block.nodes[n];
|
||||
if (!node) return;
|
||||
|
||||
if ((type && node.type === type) || node.type === 'open' || node.type === 'close') {
|
||||
if (node.escaped !== true) {
|
||||
node.value = '\\' + node.value;
|
||||
node.escaped = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns true if the given brace node should be enclosed in literal braces
|
||||
*/
|
||||
|
||||
exports.encloseBrace = node => {
|
||||
if (node.type !== 'brace') return false;
|
||||
if ((node.commas >> 0 + node.ranges >> 0) === 0) {
|
||||
node.invalid = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns true if a brace node is invalid.
|
||||
*/
|
||||
|
||||
exports.isInvalidBrace = block => {
|
||||
if (block.type !== 'brace') return false;
|
||||
if (block.invalid === true || block.dollar) return true;
|
||||
if ((block.commas >> 0 + block.ranges >> 0) === 0) {
|
||||
block.invalid = true;
|
||||
return true;
|
||||
}
|
||||
if (block.open !== true || block.close !== true) {
|
||||
block.invalid = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns true if a node is an open or close node
|
||||
*/
|
||||
|
||||
exports.isOpenOrClose = node => {
|
||||
if (node.type === 'open' || node.type === 'close') {
|
||||
return true;
|
||||
}
|
||||
return node.open === true || node.close === true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Reduce an array of text nodes.
|
||||
*/
|
||||
|
||||
exports.reduce = nodes => nodes.reduce((acc, node) => {
|
||||
if (node.type === 'text') acc.push(node.value);
|
||||
if (node.type === 'range') node.type = 'text';
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
/**
|
||||
* Flatten an array
|
||||
*/
|
||||
|
||||
exports.flatten = (...args) => {
|
||||
const result = [];
|
||||
|
||||
const flat = arr => {
|
||||
for (let i = 0; i < arr.length; i++) {
|
||||
const ele = arr[i];
|
||||
|
||||
if (Array.isArray(ele)) {
|
||||
flat(ele);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ele !== undefined) {
|
||||
result.push(ele);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
flat(args);
|
||||
return result;
|
||||
};
|
||||
77
lars/node_modules/braces/package.json
generated
vendored
Normal file
77
lars/node_modules/braces/package.json
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
{
|
||||
"name": "braces",
|
||||
"description": "Bash-like brace expansion, implemented in JavaScript. Safer than other brace expansion libs, with complete support for the Bash 4.3 braces specification, without sacrificing speed.",
|
||||
"version": "3.0.3",
|
||||
"homepage": "https://github.com/micromatch/braces",
|
||||
"author": "Jon Schlinkert (https://github.com/jonschlinkert)",
|
||||
"contributors": [
|
||||
"Brian Woodward (https://twitter.com/doowb)",
|
||||
"Elan Shanker (https://github.com/es128)",
|
||||
"Eugene Sharygin (https://github.com/eush77)",
|
||||
"hemanth.hm (http://h3manth.com)",
|
||||
"Jon Schlinkert (http://twitter.com/jonschlinkert)"
|
||||
],
|
||||
"repository": "micromatch/braces",
|
||||
"bugs": {
|
||||
"url": "https://github.com/micromatch/braces/issues"
|
||||
},
|
||||
"license": "MIT",
|
||||
"files": [
|
||||
"index.js",
|
||||
"lib"
|
||||
],
|
||||
"main": "index.js",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "mocha",
|
||||
"benchmark": "node benchmark"
|
||||
},
|
||||
"dependencies": {
|
||||
"fill-range": "^7.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"ansi-colors": "^3.2.4",
|
||||
"bash-path": "^2.0.1",
|
||||
"gulp-format-md": "^2.0.0",
|
||||
"mocha": "^6.1.1"
|
||||
},
|
||||
"keywords": [
|
||||
"alpha",
|
||||
"alphabetical",
|
||||
"bash",
|
||||
"brace",
|
||||
"braces",
|
||||
"expand",
|
||||
"expansion",
|
||||
"filepath",
|
||||
"fill",
|
||||
"fs",
|
||||
"glob",
|
||||
"globbing",
|
||||
"letter",
|
||||
"match",
|
||||
"matches",
|
||||
"matching",
|
||||
"number",
|
||||
"numerical",
|
||||
"path",
|
||||
"range",
|
||||
"ranges",
|
||||
"sh"
|
||||
],
|
||||
"verb": {
|
||||
"toc": false,
|
||||
"layout": "default",
|
||||
"tasks": [
|
||||
"readme"
|
||||
],
|
||||
"lint": {
|
||||
"reflinks": true
|
||||
},
|
||||
"plugins": [
|
||||
"gulp-format-md"
|
||||
]
|
||||
}
|
||||
}
|
||||
7
lars/node_modules/detect-libc/.npmignore
generated
vendored
Normal file
7
lars/node_modules/detect-libc/.npmignore
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
.nyc_output
|
||||
.travis.yml
|
||||
coverage
|
||||
test.js
|
||||
node_modules
|
||||
/.circleci
|
||||
/tests/integration
|
||||
201
lars/node_modules/detect-libc/LICENSE
generated
vendored
Normal file
201
lars/node_modules/detect-libc/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
78
lars/node_modules/detect-libc/README.md
generated
vendored
Normal file
78
lars/node_modules/detect-libc/README.md
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
# detect-libc
|
||||
|
||||
Node.js module to detect the C standard library (libc) implementation
|
||||
family and version in use on a given Linux system.
|
||||
|
||||
Provides a value suitable for use with the `LIBC` option of
|
||||
[prebuild](https://www.npmjs.com/package/prebuild),
|
||||
[prebuild-ci](https://www.npmjs.com/package/prebuild-ci) and
|
||||
[prebuild-install](https://www.npmjs.com/package/prebuild-install),
|
||||
therefore allowing build and provision of pre-compiled binaries
|
||||
for musl-based Linux e.g. Alpine as well as glibc-based.
|
||||
|
||||
Currently supports libc detection of `glibc` and `musl`.
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
npm install detect-libc
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### API
|
||||
|
||||
```js
|
||||
const { GLIBC, MUSL, family, version, isNonGlibcLinux } = require('detect-libc');
|
||||
```
|
||||
|
||||
* `GLIBC` is a String containing the value "glibc" for comparison with `family`.
|
||||
* `MUSL` is a String containing the value "musl" for comparison with `family`.
|
||||
* `family` is a String representing the system libc family.
|
||||
* `version` is a String representing the system libc version number.
|
||||
* `isNonGlibcLinux` is a Boolean representing whether the system is a non-glibc Linux, e.g. Alpine.
|
||||
|
||||
### detect-libc command line tool
|
||||
|
||||
When run on a Linux system with a non-glibc libc,
|
||||
the child command will be run with the `LIBC` environment variable
|
||||
set to the relevant value.
|
||||
|
||||
On all other platforms will run the child command as-is.
|
||||
|
||||
The command line feature requires `spawnSync` provided by Node v0.12+.
|
||||
|
||||
```sh
|
||||
detect-libc child-command
|
||||
```
|
||||
|
||||
## Integrating with prebuild
|
||||
|
||||
```json
|
||||
"scripts": {
|
||||
"install": "detect-libc prebuild-install || node-gyp rebuild",
|
||||
"test": "mocha && detect-libc prebuild-ci"
|
||||
},
|
||||
"dependencies": {
|
||||
"detect-libc": "^1.0.2",
|
||||
"prebuild-install": "^2.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"prebuild": "^6.2.1",
|
||||
"prebuild-ci": "^2.2.3"
|
||||
}
|
||||
```
|
||||
|
||||
## Licence
|
||||
|
||||
Copyright 2017 Lovell Fuller
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0.html)
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
18
lars/node_modules/detect-libc/bin/detect-libc.js
generated
vendored
Executable file
18
lars/node_modules/detect-libc/bin/detect-libc.js
generated
vendored
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict';
|
||||
|
||||
var spawnSync = require('child_process').spawnSync;
|
||||
var libc = require('../');
|
||||
|
||||
var spawnOptions = {
|
||||
env: process.env,
|
||||
shell: true,
|
||||
stdio: 'inherit'
|
||||
};
|
||||
|
||||
if (libc.isNonGlibcLinux) {
|
||||
spawnOptions.env.LIBC = process.env.LIBC || libc.family;
|
||||
}
|
||||
|
||||
process.exit(spawnSync(process.argv[2], process.argv.slice(3), spawnOptions).status);
|
||||
92
lars/node_modules/detect-libc/lib/detect-libc.js
generated
vendored
Normal file
92
lars/node_modules/detect-libc/lib/detect-libc.js
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
'use strict';
|
||||
|
||||
var platform = require('os').platform();
|
||||
var spawnSync = require('child_process').spawnSync;
|
||||
var readdirSync = require('fs').readdirSync;
|
||||
|
||||
var GLIBC = 'glibc';
|
||||
var MUSL = 'musl';
|
||||
|
||||
var spawnOptions = {
|
||||
encoding: 'utf8',
|
||||
env: process.env
|
||||
};
|
||||
|
||||
if (!spawnSync) {
|
||||
spawnSync = function () {
|
||||
return { status: 126, stdout: '', stderr: '' };
|
||||
};
|
||||
}
|
||||
|
||||
function contains (needle) {
|
||||
return function (haystack) {
|
||||
return haystack.indexOf(needle) !== -1;
|
||||
};
|
||||
}
|
||||
|
||||
function versionFromMuslLdd (out) {
|
||||
return out.split(/[\r\n]+/)[1].trim().split(/\s/)[1];
|
||||
}
|
||||
|
||||
function safeReaddirSync (path) {
|
||||
try {
|
||||
return readdirSync(path);
|
||||
} catch (e) {}
|
||||
return [];
|
||||
}
|
||||
|
||||
var family = '';
|
||||
var version = '';
|
||||
var method = '';
|
||||
|
||||
if (platform === 'linux') {
|
||||
// Try getconf
|
||||
var glibc = spawnSync('getconf', ['GNU_LIBC_VERSION'], spawnOptions);
|
||||
if (glibc.status === 0) {
|
||||
family = GLIBC;
|
||||
version = glibc.stdout.trim().split(' ')[1];
|
||||
method = 'getconf';
|
||||
} else {
|
||||
// Try ldd
|
||||
var ldd = spawnSync('ldd', ['--version'], spawnOptions);
|
||||
if (ldd.status === 0 && ldd.stdout.indexOf(MUSL) !== -1) {
|
||||
family = MUSL;
|
||||
version = versionFromMuslLdd(ldd.stdout);
|
||||
method = 'ldd';
|
||||
} else if (ldd.status === 1 && ldd.stderr.indexOf(MUSL) !== -1) {
|
||||
family = MUSL;
|
||||
version = versionFromMuslLdd(ldd.stderr);
|
||||
method = 'ldd';
|
||||
} else {
|
||||
// Try filesystem (family only)
|
||||
var lib = safeReaddirSync('/lib');
|
||||
if (lib.some(contains('-linux-gnu'))) {
|
||||
family = GLIBC;
|
||||
method = 'filesystem';
|
||||
} else if (lib.some(contains('libc.musl-'))) {
|
||||
family = MUSL;
|
||||
method = 'filesystem';
|
||||
} else if (lib.some(contains('ld-musl-'))) {
|
||||
family = MUSL;
|
||||
method = 'filesystem';
|
||||
} else {
|
||||
var usrSbin = safeReaddirSync('/usr/sbin');
|
||||
if (usrSbin.some(contains('glibc'))) {
|
||||
family = GLIBC;
|
||||
method = 'filesystem';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var isNonGlibcLinux = (family !== '' && family !== GLIBC);
|
||||
|
||||
module.exports = {
|
||||
GLIBC: GLIBC,
|
||||
MUSL: MUSL,
|
||||
family: family,
|
||||
version: version,
|
||||
method: method,
|
||||
isNonGlibcLinux: isNonGlibcLinux
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user