Merge remote-tracking branch 'origin/master' into ns-remove-telepath-from-core

Conflicts:
	package.json
	src/pane.coffee
This commit is contained in:
Nathan Sobo
2014-01-03 17:46:45 -07:00
57 changed files with 429 additions and 1511 deletions

View File

@@ -5,13 +5,31 @@ os = require 'os'
fm = require 'json-front-matter'
_ = require 'underscore-plus'
packageJson = require './package.json'
packageJson = require '../package.json'
# OAuth token for atom-bot
# TODO Remove once all repositories are public
process.env.ATOM_ACCESS_TOKEN ?= '362295be4c5258d3f7b967bbabae662a455ca2a7'
# Shim harmony collections in case grunt was invoked without harmony
# collections enabled
_.extend(global, require('harmony-collections')) unless global.WeakMap?
module.exports = (grunt) ->
grunt.loadNpmTasks('grunt-coffeelint')
grunt.loadNpmTasks('grunt-lesslint')
grunt.loadNpmTasks('grunt-cson')
grunt.loadNpmTasks('grunt-contrib-csslint')
grunt.loadNpmTasks('grunt-contrib-coffee')
grunt.loadNpmTasks('grunt-contrib-less')
grunt.loadNpmTasks('grunt-markdown')
grunt.loadNpmTasks('grunt-shell')
grunt.loadNpmTasks('grunt-download-atom-shell')
grunt.loadTasks('tasks')
# This allows all subsequent paths to the relative to the root of the repo
grunt.file.setBase(path.resolve('..'))
if not grunt.option('verbose')
grunt.log.writeln = (args...) -> grunt.log
grunt.log.write = (args...) -> grunt.log
@@ -189,21 +207,10 @@ module.exports = (grunt) ->
stderr: false
failOnError: false
grunt.loadNpmTasks('grunt-coffeelint')
grunt.loadNpmTasks('grunt-lesslint')
grunt.loadNpmTasks('grunt-cson')
grunt.loadNpmTasks('grunt-contrib-csslint')
grunt.loadNpmTasks('grunt-contrib-coffee')
grunt.loadNpmTasks('grunt-contrib-less')
grunt.loadNpmTasks('grunt-markdown')
grunt.loadNpmTasks('grunt-download-atom-shell')
grunt.loadNpmTasks('grunt-shell')
grunt.loadTasks('tasks')
grunt.registerTask('compile', ['coffee', 'prebuild-less', 'cson'])
grunt.registerTask('lint', ['coffeelint', 'csslint', 'lesslint'])
grunt.registerTask('test', ['shell:kill-atom', 'run-specs'])
grunt.registerTask('ci', ['download-atom-shell', 'build', 'set-development-version', 'lint', 'test'])
grunt.registerTask('ci', ['download-atom-shell', 'build', 'set-development-version', 'lint', 'test', 'publish-build'])
grunt.registerTask('deploy', ['partial-clean', 'download-atom-shell', 'build', 'codesign'])
grunt.registerTask('docs', ['markdown:guides', 'build-docs'])
grunt.registerTask('default', ['download-atom-shell', 'build', 'set-development-version', 'install'])

10
build/README.md Normal file
View File

@@ -0,0 +1,10 @@
# Atom Build
This folder contains the grunt configuration and tasks to build Atom.
It was moved from the root of the repository so that any native modules used
would be compiled against node's v8 headers since anything stored in
`node_modules` at the root of the repo is compiled against atom's v8 headers.
New build dependencies should be added to the `package.json` file located in
this folder.

33
build/package.json Normal file
View File

@@ -0,0 +1,33 @@
{
"name": "atom-build",
"description": "Atom build",
"repository": {
"type": "git",
"url": "https://github.com/atom/atom.git"
},
"dependencies": {
"biscotto": "0.0.17",
"first-mate": "~0.10.0",
"formidable": "~1.0.14",
"github-releases": "~0.2.0",
"grunt": "~0.4.1",
"grunt-cli": "~0.1.9",
"grunt-coffeelint": "git://github.com/atom/grunt-coffeelint.git",
"grunt-contrib-csslint": "~0.1.2",
"grunt-contrib-coffee": "~0.7.0",
"grunt-contrib-less": "~0.8.0",
"grunt-cson": "0.5.0",
"grunt-download-atom-shell": "git+https://atom-bot:362295be4c5258d3f7b967bbabae662a455ca2a7@github.com/atom/grunt-download-atom-shell#v0.5.0",
"grunt-lesslint": "0.13.0",
"grunt-markdown": "~0.4.0",
"grunt-shell": "~0.3.1",
"harmony-collections": "~0.3.8",
"js-yaml": "~2.1.0",
"json-front-matter": "~0.1.3",
"rcedit": "~0.1.2",
"request": "~2.27.0",
"rimraf": "~2.2.2",
"unzip": "~0.1.9",
"walkdir": "0.0.7"
}
}

View File

@@ -3,7 +3,7 @@ fs = require 'fs'
module.exports = (grunt) ->
cmd = path.join('node_modules', '.bin', 'coffee')
commonArgs = [path.join('node_modules', '.bin', 'biscotto'), '--']
commonArgs = [path.join('build', 'node_modules', '.bin', 'biscotto'), '--']
opts =
stdio: 'inherit'

View File

@@ -1,7 +1,3 @@
#!/usr/bin/env coffee
return if process.env.JANKY_SHA1 and process.env.JANKY_BRANCH isnt 'master'
child_process = require 'child_process'
path = require 'path'
@@ -10,6 +6,7 @@ fs = require 'fs-plus'
GitHub = require 'github-releases'
request = require 'request'
grunt = null
maxReleases = 10
assetName = 'atom-mac.zip'
assetPath = "/tmp/atom-build/#{assetName}"
@@ -19,16 +16,35 @@ defaultHeaders =
Authorization: "token #{token}"
'User-Agent': 'Atom'
module.exports = (gruntObject) ->
grunt = gruntObject
grunt.registerTask 'publish-build', 'Publish the built app', ->
return unless process.platform is 'darwin'
return if process.env.JANKY_SHA1 and process.env.JANKY_BRANCH isnt 'master'
done = @async()
createRelease (error, release) ->
return done(error) if error?
zipApp (error) ->
return done(error) if error?
uploadAsset release, (error) ->
return done(error) if error?
publishRelease(release, done)
logError = (message, error, details) ->
grunt.log.error(message)
grunt.log.error(error.message ? error) if error?
grunt.log.error(details) if details
zipApp = (callback) ->
fs.removeSync(assetPath)
options = {cwd: path.dirname(assetPath), maxBuffer: Infinity}
child_process.exec "zip -r --symlinks #{assetName} Atom.app", options, (error, stdout, stderr) ->
if error?
console.error('Zipping Atom.app failed', error, stderr)
process.exit(1)
else
callback()
logError('Zipping Atom.app failed', error, stderr)
callback(error)
getRelease = (callback) ->
options =
@@ -38,14 +54,14 @@ getRelease = (callback) ->
json: true
request options, (error, response, releases=[]) ->
if error? or response.statusCode isnt 200
console.error('Fetching releases failed', error, releases)
process.exit(1)
logError('Fetching releases failed', error, releases)
callback(error ? new Error(response.statusCode))
else
if releases.length > maxReleases
deleteRelease(release) for release in releases[maxReleases..]
for release in releases when release.name is commitSha
callback(release)
callback(null, release)
return
callback()
@@ -57,7 +73,7 @@ deleteRelease = (release) ->
json: true
request options, (error, response, body='') ->
if error? or response.statusCode isnt 204
console.error('Deleting release failed', error, body)
logError('Deleting release failed', error, body)
deleteExistingAsset = (release, callback) ->
for asset in release.assets when asset.name is assetName
@@ -67,8 +83,8 @@ deleteExistingAsset = (release, callback) ->
headers: defaultHeaders
request options, (error, response, body='') ->
if error? or response.statusCode isnt 204
console.error('Deleting existing release asset failed', error, body)
process.exit(1)
logError('Deleting existing release asset failed', error, body)
callback(error ? new Error(response.statusCode))
else
callback()
@@ -77,10 +93,14 @@ deleteExistingAsset = (release, callback) ->
callback()
createRelease = (callback) ->
getRelease (release) ->
getRelease (error, release) ->
if error?
callback(error)
return
if release?
deleteExistingAsset release, ->
callback(release)
deleteExistingAsset release, (error) ->
callback(error, release)
return
options =
@@ -91,15 +111,15 @@ createRelease = (callback) ->
tag_name: "v#{commitSha}"
target_commitish: 'master'
name: commitSha
body: "Build of [atom@#{commitSha.substring(0, 7)}](https://github.com/atom/atom/commit/#{commitSha})"
body: "Build of [atom@#{commitSha.substring(0, 7)}](https://github.com/atom/atom/commits/#{commitSha})"
draft: true
prerelease: true
request options, (error, response, release={}) ->
if error? or response.statusCode isnt 201
console.error('Creating release failed', error, release)
process.exit(1)
logError('Creating release failed', error, release)
callback(error ? new Error(response.statusCode))
else
callback(release)
callback(null, release)
uploadAsset = (release, callback) ->
options =
@@ -112,14 +132,14 @@ uploadAsset = (release, callback) ->
assetRequest = request options, (error, response, body='') ->
if error? or response.statusCode >= 400
console.error('Upload release asset failed', error, body)
process.exit(1)
logError('Upload release asset failed', error, body)
callback(error ? new Error(response.statusCode))
else
callback(release)
callback(null, release)
fs.createReadStream(assetPath).pipe(assetRequest)
publishRelease = (release) ->
publishRelease = (release, callback) ->
options =
uri: release.url
method: 'POST'
@@ -128,10 +148,7 @@ publishRelease = (release) ->
draft: false
request options, (error, response, body={}) ->
if error? or response.statusCode isnt 200
console.error('Creating release failed', error, body)
process.exit(1)
createRelease (release) ->
zipApp ->
uploadAsset release, ->
publishRelease release
logError('Creating release failed', error, body)
callback(error ? new Error(response.statusCode))
else
callback()

View File

@@ -6,7 +6,7 @@ module.exports = (grunt) ->
shellAppDir = grunt.config.get('atom.shellAppDir')
shellExePath = path.join(shellAppDir, 'atom.exe')
iconPath = path.resolve(__dirname, '..', 'resources', 'win', 'atom.ico')
iconPath = path.resolve('resources', 'win', 'atom.ico')
rcedit = require('rcedit')
rcedit(shellExePath, {'icon': iconPath}, done)

View File

@@ -40,25 +40,18 @@ window, and contains every other view. If you open Atom's inspector with
#### Panes
The `WorkspaceView` contains a `#horizontal` and a `#vertical` axis surrounding
`#panes`. Elements in the horizontal axis will tile across the window
horizontally, appearing to have a vertical orientation. Items in the vertical
axis will tile across the window vertically, appearing to have a horizontal
orientation. You would typically attach tool panels to the root view's primary
axes. Tool panels are elements which take up some screen real estate that isn't
devoted to direct editing. In the example above, the `TreeView` is present in
the `#horizontal` axis to the left of the `#panes`, and the `CommandPanel` is
present in the `#vertical` axis below the `#panes`.
You can attach a tool panel to an axis using the `horizontal` or `vertical`
outlets as follows:
The `WorkspaceView` contains `prependToBottom/Top/Left/Right` and
`appendToBottom/Top/Left/Right` methods, which are used to add Tool Panels. Tool
panels are elements that take up screen real estate not devoted to text editing.
In the example above, the `TreeView` is appended to the left, and the
`CommandPanel` is appended to the top.
```coffeescript
# place a view to the left of the panes (or use .append() to place it to the right)
atom.workspaceView.horizontal.prepend(new MyView)
# place a view to the left of the panes
atom.workspaceView.appendToLeft(new MyView)
# place a view below the panes (or use .prepend() to place it above)
atom.workspaceView.vertical.append(new MyOtherView)
# place a view below the panes
atom.workspaceView.appendToBottom(new MyOtherView)
```
[spacepen]: http://github.com/nathansobo/space-pen

View File

@@ -177,32 +177,33 @@ ul.modified-files-list {
}
```
We'll add one more line to the end of the `magic` method to make this pane appear:
We'll add one more line to the end of the `magic` method to make this pane
appear:
```coffeescript
atom.workspaceView.vertical.append(this)
atom.workspaceView.prependToBottom(this)
```
If you refresh Atom and hit the key command, you'll see a box appear right underneath
the editor:
If you refresh Atom and hit the key command, you'll see a box appear right
underneath the editor:
![Changer_Panel_Append]
As you might have guessed, `atom.workspaceView.vertical.append` tells Atom to append `this`
item (_i.e._, whatever is defined by`@content`) _vertically_ to the editor. If
we had called `atom.workspaceView.horizontal.append`, the pane would be attached to the
right-hand side of the editor.
As you might have guessed, `atom.workspaceView.prependToBottom` tells Atom to
prepend `this` item (_i.e._, whatever is defined by`@content`). If we had called
`atom.workspaceView.appendToBottom`, the pane would be attached below the status
bar.
Before we populate this panel for real, let's apply some logic to toggle the pane
off and on, just like we did with the tree view. Replace the `atom.workspaceView.vertical.append`
call with this code:
Before we populate this panel for real, let's apply some logic to toggle the
pane off and on, just like we did with the tree view. Replace the
`atom.workspaceView.prependToBottom` call with this code:
```coffeescript
# toggles the pane
if @hasParent()
atom.workspaceView.vertical.children().last().remove()
@remove()
else
atom.workspaceView.vertical.append(this)
atom.workspaceView.prependToBottom(this)
```
There are about a hundred different ways to toggle a pane on and off, and this
@@ -261,13 +262,13 @@ appending it to `modifiedFilesList`:
```coffeescript
# toggles the pane
if @hasParent()
atom.workspaceView.vertical.children().last().remove()
@remove()
else
for file in modifiedFiles
stat = fs.lstatSync(file)
mtime = stat.mtime
@modifiedFilesList.append("<li>#{file} - Modified at #{mtime}")
atom.workspaceView.vertical.append(this)
atom.workspaceView.prependToBottom(this)
```
When you toggle the modified files list, your pane is now populated with the
@@ -283,13 +284,13 @@ this demonstration, we'll just clear the `modifiedFilesList` each time it's clos
# toggles the pane
if @hasParent()
@modifiedFilesList.empty() # added this to clear the list on close
atom.workspaceView.vertical.children().last().remove()
@remove()
else
for file in modifiedFiles
stat = fs.lstatSync(file)
mtime = stat.mtime
@modifiedFilesList.append("<li>#{file} - Modified at #{mtime}")
atom.workspaceView.vertical.append(this)
atom.workspaceView.prependToBottom(this)
```
## Coloring UI Elements

View File

@@ -5,7 +5,7 @@
'alt-shift-left': 'editor:select-to-beginning-of-word'
'alt-shift-right': 'editor:select-to-end-of-word'
'home': 'editor:move-to-first-character-of-line'
'end': 'editor:move-to-end-of-line'
'end': 'editor:move-to-end-of-screen-line'
'shift-home': 'editor:select-to-first-character-of-line'
'shift-end': 'editor:select-to-end-of-line'

View File

@@ -26,7 +26,7 @@
'down': 'core:move-down'
'left': 'core:move-left'
'right': 'core:move-right'
'ctrl-alt-cmd-r': 'window:reload'
'ctrl-alt-cmd-l': 'window:reload'
'alt-cmd-i': 'window:toggle-dev-tools'
'cmd-alt-ctrl-p': 'window:run-package-specs'
@@ -94,12 +94,12 @@
'ctrl-A': 'editor:select-to-first-character-of-line'
'ctrl-E': 'editor:select-to-end-of-line'
'cmd-left': 'editor:move-to-first-character-of-line'
'cmd-right': 'editor:move-to-end-of-line'
'cmd-right': 'editor:move-to-end-of-screen-line'
'cmd-shift-left': 'editor:select-to-first-character-of-line'
'cmd-shift-right': 'editor:select-to-end-of-line'
'alt-backspace': 'editor:backspace-to-beginning-of-word'
'alt-delete': 'editor:delete-to-end-of-word'
'ctrl-a': 'editor:move-to-first-character-of-line'
'ctrl-a': 'editor:move-to-beginning-of-line'
'ctrl-e': 'editor:move-to-end-of-line'
'ctrl-k': 'editor:cut-to-end-of-line'

View File

@@ -25,12 +25,15 @@
"coffeestack": "0.6.0",
"diff": "git://github.com/benogle/jsdiff.git",
"emissary": "0.19.0",
"first-mate": "0.5.0",
"first-mate": "0.11.0",
"fs-plus": "0.13.0",
"fstream": "0.1.24",
"fuzzaldrin": "0.1.0",
"git-utils": "0.29.0",
"guid": "0.0.10",
"jasmine-focused": "~0.15.0",
"jasmine-node": "git://github.com/kevinsawicki/jasmine-node.git#short-stacks",
"jasmine-tagged": "0.2.0",
"mkdirp": "0.3.5",
"keytar": "0.13.0",
"less-cache": "0.10.0",
@@ -50,31 +53,6 @@
"underscore-plus": "0.6.1",
"reactionary": "~0.1.0"
},
"devDependencies": {
"biscotto": "0.0.17",
"formidable": "~1.0.14",
"fstream": "0.1.24",
"grunt": "~0.4.1",
"grunt-cli": "~0.1.9",
"grunt-coffeelint": "git://github.com/atom/grunt-coffeelint.git",
"grunt-lesslint": "0.13.0",
"grunt-cson": "0.5.0",
"grunt-contrib-csslint": "~0.1.2",
"grunt-contrib-coffee": "~0.7.0",
"grunt-contrib-less": "~0.8.0",
"walkdir": "0.0.7",
"js-yaml": "~2.1.0",
"grunt-markdown": "~0.4.0",
"json-front-matter": "~0.1.3",
"grunt-shell": "~0.3.1",
"jasmine-node": "git://github.com/kevinsawicki/jasmine-node.git#short-stacks",
"jasmine-tagged": "0.2.0",
"request": "~2.27.0",
"unzip": "~0.1.9",
"rcedit": "~0.1.2",
"rimraf": "~2.2.2",
"github-releases": "~0.2.0"
},
"packageDependencies": {
"atom-dark-syntax": "0.10.0",
"atom-dark-ui": "0.18.0",
@@ -95,8 +73,8 @@
"dev-live-reload": "0.20.0",
"editor-stats": "0.9.0",
"exception-reporting": "0.9.0",
"feedback": "0.20.0",
"find-and-replace": "0.63.0",
"feedback": "0.21.0",
"find-and-replace": "0.65.0",
"fuzzy-finder": "0.28.0",
"gists": "0.13.0",
"git-diff": "0.21.0",
@@ -112,7 +90,7 @@
"release-notes": "0.15.0",
"settings-view": "0.52.0",
"snippets": "0.17.0",
"spell-check": "0.17.0",
"spell-check": "0.18.0",
"status-bar": "0.27.0",
"styleguide": "0.19.0",
"symbols-view": "0.27.0",
@@ -157,8 +135,7 @@
"language-todo": "0.2.0",
"language-toml": "0.7.0",
"language-xml": "0.2.0",
"language-yaml": "0.1.0",
"grunt-download-atom-shell": "0.4.0"
"language-yaml": "0.1.0"
},
"private": true,
"scripts": {

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env node
#!/usr/bin/env node --harmony_collections
var safeExec = require('./utils/child-process-wrapper.js').safeExec;
var fs = require('fs');
var path = require('path');
@@ -35,6 +35,7 @@ var echoNewLine = process.platform == 'win32' ? 'echo.' : 'echo';
var commands = [
'git submodule --quiet sync',
'git submodule --quiet update --recursive --init',
{command: 'npm install --quiet', options: {cwd: path.resolve(__dirname, '..', 'build'), ignoreStdout: true}},
{command: 'npm install --quiet', options: {cwd: apmVendorPath, ignoreStdout: true}},
{command: 'npm install --quiet ' + apmVendorPath, options: {cwd: apmInstallPath, ignoreStdout: true}},
{command: 'npm install --quiet ' + apmVendorPath, options: {ignoreStdout: true}},

View File

@@ -1,11 +1,13 @@
#!/usr/bin/env node
#!/usr/bin/env node --harmony_collections
var cp = require('./utils/child-process-wrapper.js');
var path = require('path');
process.chdir(path.dirname(__dirname));
cp.safeExec('node script/bootstrap', function() {
// node_modules/.bin/grunt "$@"
var gruntPath = path.join('node_modules', '.bin', 'grunt') + (process.platform === 'win32' ? '.cmd' : '');
cp.safeSpawn(gruntPath, process.argv.slice(2), process.exit);
// build/node_modules/.bin/grunt "$@"
var gruntPath = path.join('build', 'node_modules', '.bin', 'grunt') + (process.platform === 'win32' ? '.cmd' : '');
var args = ['--gruntfile', path.resolve('build', 'Gruntfile.coffee')];
args = args.concat(process.argv.slice(2));
cp.safeSpawn(gruntPath, args, process.exit);
});

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env node
#!/usr/bin/env node --harmony_collections
var cp = require('./utils/child-process-wrapper.js');
var fs = require('fs');
var path = require('path');
@@ -28,16 +28,13 @@ readEnvironmentVariables();
cp.safeExec.bind(global, 'node script/bootstrap', function(error) {
if (error)
process.exit(1);
require('fs-plus').removeSync.bind(global, path.join(homeDir, '.atom'))
var async = require('async');
var gruntPath = path.join('node_modules', '.bin', 'grunt') + (process.platform === 'win32' ? '.cmd' : '');
var gruntPath = path.join('build', 'node_modules', '.bin', 'grunt') + (process.platform === 'win32' ? '.cmd' : '');
var tasks = [
require('rimraf').bind(global, path.join(homeDir, '.atom')),
cp.safeExec.bind(global, 'git clean -dff'),
cp.safeExec.bind(global, gruntPath + ' ci --stack --no-color'),
cp.safeExec.bind(global, gruntPath + ' ci --gruntfile build/Gruntfile.coffee --stack --no-color'),
]
if (process.platform === 'darwin') {
tasks.push(cp.safeExec.bind(global, 'node_modules/.bin/coffee script/upload-release'))
}
async.series(tasks, function(error) {
process.exit(error ? 1 : 0);
});

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env node
#!/usr/bin/env node --harmony_collections
var cp = require('./utils/child-process-wrapper.js');
var path = require('path');
var os = require('os');
@@ -16,6 +16,7 @@ var killatom = process.platform === 'win32' ? 'START taskkill /F /IM ' + product
var commands = [
killatom,
[__dirname, '..', 'node_modules'],
[__dirname, '..', 'build', 'node_modules'],
[__dirname, '..', 'atom-shell'],
[home, '.atom', '.node-gyp'],
[home, '.atom', 'storage'],

View File

@@ -9,7 +9,7 @@ cd "$(dirname "$0")/../.."
rm -fr node_modules
rm -fr vendor/apm/node_modules
./script/bootstrap --no-color
./node_modules/.bin/grunt --no-color --build-dir="$BUILT_PRODUCTS_DIR" deploy
./build/node_modules/.bin/grunt --no-color --build-dir="$BUILT_PRODUCTS_DIR" deploy
echo "TARGET_BUILD_DIR=$BUILT_PRODUCTS_DIR"
echo "FULL_PRODUCT_NAME=Atom.app"

9
script/grunt Executable file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/env node --harmony_collections
var cp = require('./utils/child-process-wrapper.js');
var path = require('path');
// node build/node_modules/grunt-cli/bin/grunt "$@"
var gruntPath = path.resolve(__dirname, '..', 'build', 'node_modules', 'grunt-cli', 'bin', 'grunt') + (process.platform === 'win32' ? '.cmd' : '');
var args = [gruntPath, '--gruntfile', path.resolve('build', 'Gruntfile.coffee')];
args = args.concat(process.argv.slice(2));
cp.safeSpawn(process.execPath, args, process.exit);

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env node
#!/usr/bin/env node --harmony_collections
var safeExec = require('./utils/child-process-wrapper.js').safeExec;
var path = require('path');

View File

@@ -358,13 +358,13 @@ describe "Editor", ->
expect(editor.getCursors().length).toBe 1
expect(editor.getCursorBufferPosition()).toEqual [12,2]
describe ".moveCursorToBeginningOfLine()", ->
describe ".moveCursorToBeginningOfScreenLine()", ->
describe "when soft wrap is on", ->
it "moves cursor to the beginning of the screen line", ->
editor.setSoftWrap(true)
editor.setEditorWidthInChars(10)
editor.setCursorScreenPosition([1, 2])
editor.moveCursorToBeginningOfLine()
editor.moveCursorToBeginningOfScreenLine()
cursor = editor.getCursor()
expect(cursor.getScreenPosition()).toEqual [1, 0]
@@ -372,19 +372,19 @@ describe "Editor", ->
it "moves cursor to the beginning of then line", ->
editor.setCursorScreenPosition [0,5]
editor.addCursorAtScreenPosition [1,7]
editor.moveCursorToBeginningOfLine()
editor.moveCursorToBeginningOfScreenLine()
expect(editor.getCursors().length).toBe 2
[cursor1, cursor2] = editor.getCursors()
expect(cursor1.getBufferPosition()).toEqual [0,0]
expect(cursor2.getBufferPosition()).toEqual [1,0]
describe ".moveCursorToEndOfLine()", ->
describe ".moveCursorToEndOfScreenLine()", ->
describe "when soft wrap is on", ->
it "moves cursor to the beginning of the screen line", ->
editor.setSoftWrap(true)
editor.setEditorWidthInChars(10)
editor.setCursorScreenPosition([1, 2])
editor.moveCursorToEndOfLine()
editor.moveCursorToEndOfScreenLine()
cursor = editor.getCursor()
expect(cursor.getScreenPosition()).toEqual [1, 9]
@@ -392,12 +392,30 @@ describe "Editor", ->
it "moves cursor to the end of line", ->
editor.setCursorScreenPosition [0,0]
editor.addCursorAtScreenPosition [1,0]
editor.moveCursorToEndOfLine()
editor.moveCursorToEndOfScreenLine()
expect(editor.getCursors().length).toBe 2
[cursor1, cursor2] = editor.getCursors()
expect(cursor1.getBufferPosition()).toEqual [0,29]
expect(cursor2.getBufferPosition()).toEqual [1,30]
describe ".moveCursorToBeginningOfLine()", ->
it "moves cursor to the beginning of the buffer line", ->
editor.setSoftWrap(true)
editor.setEditorWidthInChars(10)
editor.setCursorScreenPosition([1, 2])
editor.moveCursorToBeginningOfLine()
cursor = editor.getCursor()
expect(cursor.getScreenPosition()).toEqual [0, 0]
describe ".moveCursorToEndOfLine()", ->
it "moves cursor to the end of the buffer line", ->
editor.setSoftWrap(true)
editor.setEditorWidthInChars(10)
editor.setCursorScreenPosition([0, 2])
editor.moveCursorToEndOfLine()
cursor = editor.getCursor()
expect(cursor.getScreenPosition()).toEqual [3, 4]
describe ".moveCursorToFirstCharacterOfLine()", ->
describe "when soft wrap is on", ->
it "moves to the first character of the current screen line or the beginning of the screen line if it's already on the first character", ->
@@ -429,6 +447,13 @@ describe "Editor", ->
expect(cursor1.getBufferPosition()).toEqual [0,0]
expect(cursor2.getBufferPosition()).toEqual [1,0]
it "moves to the beginning of the line if it only contains whitespace ", ->
editor.setText("first\n \nthird")
editor.setCursorScreenPosition [1,2]
editor.moveCursorToFirstCharacterOfLine()
cursor = editor.getCursor()
expect(cursor.getBufferPosition()).toEqual [1,0]
describe ".moveCursorToBeginningOfWord()", ->
it "moves the cursor to the beginning of the word", ->
editor.setCursorBufferPosition [0, 8]
@@ -2670,3 +2695,55 @@ describe "Editor", ->
expect(editor.getCursorBufferPosition()).toEqual [0, 2]
editor.moveCursorLeft()
expect(editor.getCursorBufferPosition()).toEqual [0, 0]
describe "when the editor's grammar has an injection selector", ->
beforeEach ->
atom.packages.activatePackage('language-text', sync: true)
atom.packages.activatePackage('language-javascript', sync: true)
it "includes the grammar's patterns when the selector matches the current scope in other grammars", ->
atom.packages.activatePackage('language-hyperlink', sync: true)
grammar = atom.syntax.selectGrammar("text.js")
{tokens} = grammar.tokenizeLine("var i; // http://github.com")
expect(tokens[0].value).toBe "var"
expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"]
expect(tokens[6].value).toBe "http://github.com"
expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is added", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
editor = atom.project.openSync('sample.js')
editor.setText("// http://github.com")
{tokens} = editor.lineForScreenRow(0)
expect(tokens[1].value).toBe " http://github.com"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
atom.packages.activatePackage('language-hyperlink', sync: true)
{tokens} = editor.lineForScreenRow(0)
expect(tokens[2].value).toBe "http://github.com"
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is updated", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
editor = atom.project.openSync('sample.js')
editor.setText("// SELECT * FROM OCTOCATS")
{tokens} = editor.lineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
atom.packages.activatePackage('package-with-injection-selector', sync: true)
{tokens} = editor.lineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
atom.packages.activatePackage('language-sql', sync: true)
{tokens} = editor.lineForScreenRow(0)
expect(tokens[2].value).toBe "SELECT"
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]

View File

@@ -123,7 +123,7 @@ describe "EditorView", ->
describe ".remove()", ->
it "destroys the edit session", ->
editorView.remove()
expect(editorView.editor.destroyed).toBeTruthy()
expect(editorView.editor.isDestroyed()).toBe true
describe ".edit(editor)", ->
[newEditor, newBuffer] = []

View File

@@ -0,0 +1,4 @@
'name': 'test'
'scopeName': 'source.test'
'injectionSelector': 'comment'
'patterns': [{'include': 'source.sql'}]

View File

@@ -1,5 +1,6 @@
fs = require 'fs-plus'
path = require 'path'
temp = require 'temp'
Keymap = require '../src/keymap'
{$, $$, WorkspaceView} = require 'atom'
@@ -7,9 +8,11 @@ describe "Keymap", ->
fragment = null
keymap = null
resourcePath = atom.getLoadSettings().resourcePath
configDirPath = null
beforeEach ->
keymap = new Keymap({configDirPath: atom.getConfigDirPath(), resourcePath})
configDirPath = temp.mkdirSync('atom')
keymap = new Keymap({configDirPath, resourcePath})
fragment = $ """
<div class="command-mode">
<div class="child-node">
@@ -18,6 +21,9 @@ describe "Keymap", ->
</div>
"""
afterEach ->
keymap.destroy()
describe ".handleKeyEvent(event)", ->
deleteCharHandler = null
insertCharHandler = null
@@ -347,3 +353,19 @@ describe "Keymap", ->
el = $$ -> @div class: 'brown'
bindings = keymap.keyBindingsForCommandMatchingElement('cultivate', el)
expect(bindings).toHaveLength 0
describe "when the user keymap file is changed", ->
it "is reloaded", ->
keymapFilePath = path.join(configDirPath, "keymap.cson")
fs.writeFileSync(keymapFilePath, '"body": "ctrl-l": "core:move-left"')
keymap.loadUserKeymap()
spyOn(keymap, 'loadUserKeymap').andCallThrough()
fs.writeFileSync(keymapFilePath, "'body': 'ctrl-l': 'core:move-right'")
waitsFor ->
keymap.loadUserKeymap.callCount > 0
runs ->
keyBinding = keymap.keyBindingsForKeystroke('ctrl-l')[0]
expect(keyBinding.command).toBe 'core:move-right'

View File

@@ -143,7 +143,7 @@ describe "Pane", ->
it "removes the item and tries to call destroy on it", ->
pane.destroyItem(editor2)
expect(pane.getItems().indexOf(editor2)).toBe -1
expect(editor2.destroyed).toBeTruthy()
expect(editor2.isDestroyed()).toBe true
describe "if the item is modified", ->
beforeEach ->
@@ -162,7 +162,7 @@ describe "Pane", ->
expect(editor2.save).toHaveBeenCalled()
expect(pane.getItems().indexOf(editor2)).toBe -1
expect(editor2.destroyed).toBeTruthy()
expect(editor2.isDestroyed()).toBe true
describe "when the item has no uri", ->
it "presents a save-as dialog, then saves the item with the given uri before removing and destroying it", ->
@@ -176,7 +176,7 @@ describe "Pane", ->
expect(editor2.saveAs).toHaveBeenCalledWith("/selected/path")
expect(pane.getItems().indexOf(editor2)).toBe -1
expect(editor2.destroyed).toBeTruthy()
expect(editor2.isDestroyed()).toBe true
describe "if the [Don't Save] option is selected", ->
it "removes and destroys the item without saving it", ->
@@ -185,7 +185,7 @@ describe "Pane", ->
expect(editor2.save).not.toHaveBeenCalled()
expect(pane.getItems().indexOf(editor2)).toBe -1
expect(editor2.destroyed).toBeTruthy()
expect(editor2.isDestroyed()).toBe true
describe "if the [Cancel] option is selected", ->
it "does not save, remove, or destroy the item", ->
@@ -194,7 +194,7 @@ describe "Pane", ->
expect(editor2.save).not.toHaveBeenCalled()
expect(pane.getItems().indexOf(editor2)).not.toBe -1
expect(editor2.destroyed).toBeFalsy()
expect(editor2.isDestroyed()).toBe false
describe "::removeItem(item)", ->
it "removes the item from the items list and shows the next item if it was showing", ->
@@ -290,7 +290,7 @@ describe "Pane", ->
expect(pane.hasParent()).toBeFalsy()
expect(pane2.getItems()).toEqual [view3, editor1]
expect(editor1.destroyed).toBeFalsy()
expect(editor1.isDestroyed()).toBe false
describe "when the item is a jQuery object", ->
it "preserves data by detaching instead of removing", ->
@@ -304,14 +304,14 @@ describe "Pane", ->
pane.showItem(editor1)
pane.trigger 'pane:close'
expect(pane.hasParent()).toBeFalsy()
expect(editor2.destroyed).toBeTruthy()
expect(editor1.destroyed).toBeTruthy()
expect(editor2.isDestroyed()).toBe true
expect(editor1.isDestroyed()).toBe true
describe "pane:close-other-items", ->
it "destroys all items except the current", ->
pane.showItem(editor1)
pane.trigger 'pane:close-other-items'
expect(editor2.destroyed).toBeTruthy()
expect(editor2.isDestroyed()).toBe true
expect(pane.getItems()).toEqual [editor1]
describe "::saveActiveItem()", ->
@@ -423,8 +423,8 @@ describe "Pane", ->
describe "::remove()", ->
it "destroys all the pane's items", ->
pane.remove()
expect(editor1.destroyed).toBeTruthy()
expect(editor2.destroyed).toBeTruthy()
expect(editor1.isDestroyed()).toBe true
expect(editor2.isDestroyed()).toBe true
it "triggers a 'pane:removed' event with the pane", ->
removedHandler = jasmine.createSpy("removedHandler")

View File

@@ -399,6 +399,7 @@ describe "Project", ->
range: [[2, 6], [2, 11]]
it "works on evil filenames", ->
platform.generateEvilFiles()
atom.project.setPath(path.join(__dirname, 'fixtures', 'evil-files'))
paths = []
matches = []

View File

@@ -1,8 +1,6 @@
path = require 'path'
fs = require 'fs-plus'
{_} = require 'atom'
## Platform specific helpers
module.exports =
# Public: Returns true if being run from within Windows
@@ -18,20 +16,20 @@ module.exports =
fs.removeSync(evilFilesPath) if fs.existsSync(evilFilesPath)
fs.mkdirSync(evilFilesPath)
if (@isWindows())
if @isWindows()
filenames = [
"a_file_with_utf8.txt",
"file with spaces.txt",
"a_file_with_utf8.txt"
"file with spaces.txt"
"utfa\u0306.md"
]
else
filenames = [
"a_file_with_utf8.txt",
"file with spaces.txt",
"goddam\nnewlines",
"quote\".txt",
"a_file_with_utf8.txt"
"file with spaces.txt"
"goddam\nnewlines"
"quote\".txt"
"utfa\u0306.md"
]
for filename in filenames
fd = fs.writeFileSync(path.join(evilFilesPath, filename), 'evil file!', flag: 'w')
fs.writeFileSync(path.join(evilFilesPath, filename), 'evil file!', flag: 'w')

View File

@@ -13,11 +13,8 @@ Editor = require '../src/editor'
EditorView = require '../src/editor-view'
TokenizedBuffer = require '../src/tokenized-buffer'
pathwatcher = require 'pathwatcher'
platform = require './spec-helper-platform'
clipboard = require 'clipboard'
platform.generateEvilFiles()
atom.themes.loadBaseStylesheets()
atom.themes.requireStylesheet '../static/jasmine'

View File

@@ -1,7 +1,6 @@
{fs} = require 'atom'
path = require 'path'
temp = require 'temp'
TextMateGrammar = require '../src/text-mate-grammar'
describe "the `syntax` global", ->
beforeEach ->
@@ -62,20 +61,23 @@ describe "the `syntax` global", ->
describe "when multiple grammars have matching fileTypes", ->
it "selects the grammar with the longest fileType match", ->
grammar1 = new TextMateGrammar
grammarPath1 = temp.path(suffix: '.json')
fs.writeFileSync grammarPath1, JSON.stringify(
name: 'test1'
scopeName: 'source1'
fileTypes: ['test', 'more.test']
fileTypes: ['test']
)
grammar1 = atom.syntax.loadGrammarSync(grammarPath1)
expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar1
grammar2 = new TextMateGrammar
grammarPath2 = temp.path(suffix: '.json')
fs.writeFileSync grammarPath2, JSON.stringify(
name: 'test2'
scopeName: 'source2'
fileTypes: ['test']
atom.syntax.addGrammar(grammar1)
atom.syntax.addGrammar(grammar2)
expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar1
fileTypes: ['test', 'more.test']
)
grammar2 = atom.syntax.loadGrammarSync(grammarPath2)
expect(atom.syntax.selectGrammar('more.test', '')).toBe grammar2
describe "when there is no file path", ->
it "does not throw an exception (regression)", ->

View File

@@ -1,704 +0,0 @@
TextMateGrammar = require '../src/text-mate-grammar'
TextMatePackage = require '../src/text-mate-package'
{_, fs} = require 'atom'
describe "TextMateGrammar", ->
grammar = null
beforeEach ->
atom.packages.activatePackage('language-text', sync: true)
atom.packages.activatePackage('language-javascript', sync: true)
atom.packages.activatePackage('language-coffee-script', sync: true)
atom.packages.activatePackage('language-ruby', sync: true)
atom.packages.activatePackage('language-html', sync: true)
atom.packages.activatePackage('language-php', sync: true)
atom.packages.activatePackage('language-python', sync: true)
grammar = atom.syntax.selectGrammar("hello.coffee")
describe "@loadSync(path)", ->
it "loads grammars from plists", ->
grammar = TextMateGrammar.loadSync(require.resolve('./fixtures/sample.plist'))
expect(grammar.scopeName).toBe "text.plain"
{tokens} = grammar.tokenizeLine("this text is so plain. i love it.")
expect(tokens[0]).toEqual value: "this text is so plain. i love it.", scopes: ["text.plain", "meta.paragraph.text"]
it "loads grammars from cson files", ->
grammar = TextMateGrammar.loadSync(require.resolve('./fixtures/packages/package-with-grammars/grammars/alot.cson'))
expect(grammar.scopeName).toBe "source.alot"
{tokens} = grammar.tokenizeLine("this is alot of code")
expect(tokens[1]).toEqual value: "alot", scopes: ["source.alot", "keyword.alot"]
describe ".tokenizeLine(line, ruleStack)", ->
describe "when the entire line matches a single pattern with no capture groups", ->
it "returns a single token with the correct scope", ->
{tokens} = grammar.tokenizeLine("return")
expect(tokens.length).toBe 1
[token] = tokens
expect(token.scopes).toEqual ['source.coffee', 'keyword.control.coffee']
describe "when the entire line matches a single pattern with capture groups", ->
it "returns a single token with the correct scope", ->
{tokens} = grammar.tokenizeLine("new foo.bar.Baz")
expect(tokens.length).toBe 3
[newOperator, whitespace, className] = tokens
expect(newOperator).toEqual value: 'new', scopes: ['source.coffee', 'meta.class.instance.constructor', 'keyword.operator.new.coffee']
expect(whitespace).toEqual value: ' ', scopes: ['source.coffee', 'meta.class.instance.constructor']
expect(className).toEqual value: 'foo.bar.Baz', scopes: ['source.coffee', 'meta.class.instance.constructor', 'entity.name.type.instance.coffee']
describe "when the line doesn't match any patterns", ->
it "returns the entire line as a single simple token with the grammar's scope", ->
textGrammar = atom.syntax.selectGrammar('foo.txt')
{tokens} = textGrammar.tokenizeLine("abc def")
expect(tokens.length).toBe 1
describe "when the line matches multiple patterns", ->
it "returns multiple tokens, filling in regions that don't match patterns with tokens in the grammar's global scope", ->
{tokens} = grammar.tokenizeLine(" return new foo.bar.Baz ")
expect(tokens.length).toBe 7
expect(tokens[0]).toEqual value: ' ', scopes: ['source.coffee']
expect(tokens[1]).toEqual value: 'return', scopes: ['source.coffee', 'keyword.control.coffee']
expect(tokens[2]).toEqual value: ' ', scopes: ['source.coffee']
expect(tokens[3]).toEqual value: 'new', scopes: ['source.coffee', 'meta.class.instance.constructor', 'keyword.operator.new.coffee']
expect(tokens[4]).toEqual value: ' ', scopes: ['source.coffee', 'meta.class.instance.constructor']
expect(tokens[5]).toEqual value: 'foo.bar.Baz', scopes: ['source.coffee', 'meta.class.instance.constructor', 'entity.name.type.instance.coffee']
expect(tokens[6]).toEqual value: ' ', scopes: ['source.coffee']
describe "when the line matches a pattern with optional capture groups", ->
it "only returns tokens for capture groups that matched", ->
{tokens} = grammar.tokenizeLine("class Quicksort")
expect(tokens.length).toBe 3
expect(tokens[0].value).toBe "class"
expect(tokens[1].value).toBe " "
expect(tokens[2].value).toBe "Quicksort"
describe "when the line matches a rule with nested capture groups and lookahead capture groups beyond the scope of the overall match", ->
it "creates distinct tokens for nested captures and does not return tokens beyond the scope of the overall capture", ->
{tokens} = grammar.tokenizeLine(" destroy: ->")
expect(tokens.length).toBe 6
expect(tokens[0]).toEqual(value: ' ', scopes: ["source.coffee"])
expect(tokens[1]).toEqual(value: 'destro', scopes: ["source.coffee", "meta.function.coffee", "entity.name.function.coffee"])
# this dangling 'y' with a duplicated scope looks wrong, but textmate yields the same behavior. probably a quirk in the coffee grammar.
expect(tokens[2]).toEqual(value: 'y', scopes: ["source.coffee", "meta.function.coffee", "entity.name.function.coffee", "entity.name.function.coffee"])
expect(tokens[3]).toEqual(value: ':', scopes: ["source.coffee", "keyword.operator.coffee"])
expect(tokens[4]).toEqual(value: ' ', scopes: ["source.coffee"])
expect(tokens[5]).toEqual(value: '->', scopes: ["source.coffee", "storage.type.function.coffee"])
describe "when the line matches a pattern that includes a rule", ->
it "returns tokens based on the included rule", ->
{tokens} = grammar.tokenizeLine("7777777")
expect(tokens.length).toBe 1
expect(tokens[0]).toEqual value: '7777777', scopes: ['source.coffee', 'constant.numeric.coffee']
describe "when the line is an interpolated string", ->
it "returns the correct tokens", ->
{tokens} = grammar.tokenizeLine('"the value is #{@x} my friend"')
expect(tokens[0]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"]
expect(tokens[1]).toEqual value: "the value is ", scopes: ["source.coffee","string.quoted.double.coffee"]
expect(tokens[2]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
expect(tokens[3]).toEqual value: "@x", scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","variable.other.readwrite.instance.coffee"]
expect(tokens[4]).toEqual value: "}", scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
expect(tokens[5]).toEqual value: " my friend", scopes: ["source.coffee","string.quoted.double.coffee"]
expect(tokens[6]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.end.coffee"]
describe "when the line has an interpolated string inside an interpolated string", ->
it "returns the correct tokens", ->
{tokens} = grammar.tokenizeLine('"#{"#{@x}"}"')
expect(tokens[0]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"]
expect(tokens[1]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
expect(tokens[2]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","punctuation.definition.string.begin.coffee"]
expect(tokens[3]).toEqual value: '#{', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
expect(tokens[4]).toEqual value: '@x', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","variable.other.readwrite.instance.coffee"]
expect(tokens[5]).toEqual value: '}', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
expect(tokens[6]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","string.quoted.double.coffee","punctuation.definition.string.end.coffee"]
expect(tokens[7]).toEqual value: '}', scopes: ["source.coffee","string.quoted.double.coffee","source.coffee.embedded.source","punctuation.section.embedded.coffee"]
expect(tokens[8]).toEqual value: '"', scopes: ["source.coffee","string.quoted.double.coffee","punctuation.definition.string.end.coffee"]
describe "when the line is empty", ->
it "returns a single token which has the global scope", ->
{tokens} = grammar.tokenizeLine('')
expect(tokens[0]).toEqual value: '', scopes: ["source.coffee"]
describe "when the line matches no patterns", ->
it "does not infinitely loop", ->
grammar = atom.syntax.selectGrammar("sample.txt")
{tokens} = grammar.tokenizeLine('hoo')
expect(tokens.length).toBe 1
expect(tokens[0]).toEqual value: 'hoo', scopes: ["text.plain", "meta.paragraph.text"]
describe "when the line matches a pattern with a 'contentName'", ->
it "creates tokens using the content of contentName as the token name", ->
grammar = atom.syntax.selectGrammar("sample.txt")
{tokens} = grammar.tokenizeLine('ok, cool')
expect(tokens[0]).toEqual value: 'ok, cool', scopes: ["text.plain", "meta.paragraph.text"]
describe "when the line matches a pattern with no `name` or `contentName`", ->
it "creates tokens without adding a new scope", ->
grammar = atom.syntax.selectGrammar('foo.rb')
{tokens} = grammar.tokenizeLine('%w|oh \\look|')
expect(tokens.length).toBe 5
expect(tokens[0]).toEqual value: '%w|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.begin.ruby"]
expect(tokens[1]).toEqual value: 'oh ', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
expect(tokens[2]).toEqual value: '\\l', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
expect(tokens[3]).toEqual value: 'ook', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
describe "when the line matches a begin/end pattern", ->
it "returns tokens based on the beginCaptures, endCaptures and the child scope", ->
{tokens} = grammar.tokenizeLine("'''single-quoted heredoc'''")
expect(tokens.length).toBe 3
expect(tokens[0]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.begin.coffee']
expect(tokens[1]).toEqual value: "single-quoted heredoc", scopes: ['source.coffee', 'string.quoted.heredoc.coffee']
expect(tokens[2]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.end.coffee']
describe "when the pattern spans multiple lines", ->
it "uses the ruleStack returned by the first line to parse the second line", ->
{tokens: firstTokens, ruleStack} = grammar.tokenizeLine("'''single-quoted")
{tokens: secondTokens, ruleStack} = grammar.tokenizeLine("heredoc'''", ruleStack)
expect(firstTokens.length).toBe 2
expect(secondTokens.length).toBe 2
expect(firstTokens[0]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.begin.coffee']
expect(firstTokens[1]).toEqual value: "single-quoted", scopes: ['source.coffee', 'string.quoted.heredoc.coffee']
expect(secondTokens[0]).toEqual value: "heredoc", scopes: ['source.coffee', 'string.quoted.heredoc.coffee']
expect(secondTokens[1]).toEqual value: "'''", scopes: ['source.coffee', 'string.quoted.heredoc.coffee', 'punctuation.definition.string.end.coffee']
describe "when the pattern contains sub-patterns", ->
it "returns tokens within the begin/end scope based on the sub-patterns", ->
{tokens} = grammar.tokenizeLine('"""heredoc with character escape \\t"""')
expect(tokens.length).toBe 4
expect(tokens[0]).toEqual value: '"""', scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'punctuation.definition.string.begin.coffee']
expect(tokens[1]).toEqual value: "heredoc with character escape ", scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee']
expect(tokens[2]).toEqual value: "\\t", scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'constant.character.escape.coffee']
expect(tokens[3]).toEqual value: '"""', scopes: ['source.coffee', 'string.quoted.double.heredoc.coffee', 'punctuation.definition.string.end.coffee']
describe "when the end pattern contains a back reference", ->
it "constructs the end rule based on its back-references to captures in the begin rule", ->
grammar = atom.syntax.selectGrammar('foo.rb')
{tokens} = grammar.tokenizeLine('%w|oh|,')
expect(tokens.length).toBe 4
expect(tokens[0]).toEqual value: '%w|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.begin.ruby"]
expect(tokens[1]).toEqual value: 'oh', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby"]
expect(tokens[2]).toEqual value: '|', scopes: ["source.ruby", "string.quoted.other.literal.lower.ruby", "punctuation.definition.string.end.ruby"]
expect(tokens[3]).toEqual value: ',', scopes: ["source.ruby", "punctuation.separator.object.ruby"]
it "allows the rule containing that end pattern to be pushed to the stack multiple times", ->
grammar = atom.syntax.selectGrammar('foo.rb')
{tokens} = grammar.tokenizeLine('%Q+matz had some #{%Q-crazy ideas-} for ruby syntax+ # damn.')
expect(tokens[0]).toEqual value: '%Q+', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.begin.ruby"]
expect(tokens[1]).toEqual value: 'matz had some ', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby"]
expect(tokens[2]).toEqual value: '#{', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","punctuation.section.embedded.begin.ruby"]
expect(tokens[3]).toEqual value: '%Q-', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.begin.ruby"]
expect(tokens[4]).toEqual value: 'crazy ideas', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby"]
expect(tokens[5]).toEqual value: '-', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.end.ruby"]
expect(tokens[6]).toEqual value: '}', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","meta.embedded.line.ruby","punctuation.section.embedded.end.ruby", "source.ruby"]
expect(tokens[7]).toEqual value: ' for ruby syntax', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby"]
expect(tokens[8]).toEqual value: '+', scopes: ["source.ruby","string.quoted.other.literal.upper.ruby","punctuation.definition.string.end.ruby"]
expect(tokens[9]).toEqual value: ' ', scopes: ["source.ruby"]
expect(tokens[10]).toEqual value: '#', scopes: ["source.ruby","comment.line.number-sign.ruby","punctuation.definition.comment.ruby"]
expect(tokens[11]).toEqual value: ' damn.', scopes: ["source.ruby","comment.line.number-sign.ruby"]
describe "when the pattern includes rules from another grammar", ->
describe "when a grammar matching the desired scope is available", ->
it "parses tokens inside the begin/end patterns based on the included grammar's rules", ->
atom.packages.activatePackage('language-html', sync: true)
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
grammar = atom.syntax.grammarForScopeName('text.html.ruby')
{tokens} = grammar.tokenizeLine("<div class='name'><%= User.find(2).full_name %></div>")
expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
expect(tokens[1]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"]
expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","meta.tag.block.any.html"]
expect(tokens[3]).toEqual value: 'class', scopes: ["text.html.ruby","meta.tag.block.any.html", "entity.other.attribute-name.html"]
expect(tokens[4]).toEqual value: '=', scopes: ["text.html.ruby","meta.tag.block.any.html"]
expect(tokens[5]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.begin.html"]
expect(tokens[6]).toEqual value: 'name', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html"]
expect(tokens[7]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.end.html"]
expect(tokens[8]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"]
expect(tokens[9]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
expect(tokens[10]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
expect(tokens[11]).toEqual value: 'User', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","support.class.ruby"]
expect(tokens[12]).toEqual value: '.', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.separator.method.ruby"]
expect(tokens[13]).toEqual value: 'find', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
expect(tokens[14]).toEqual value: '(', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.function.ruby"]
expect(tokens[15]).toEqual value: '2', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","constant.numeric.ruby"]
expect(tokens[16]).toEqual value: ')', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.function.ruby"]
expect(tokens[17]).toEqual value: '.', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.separator.method.ruby"]
expect(tokens[18]).toEqual value: 'full_name ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
expect(tokens[19]).toEqual value: '%>', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
expect(tokens[20]).toEqual value: '</', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
expect(tokens[21]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"]
expect(tokens[22]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"]
it "updates the grammar if the included grammar is updated later", ->
atom.packages.activatePackage('language-html', sync: true)
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
grammar = atom.syntax.selectGrammar('foo.html.erb')
grammarUpdatedHandler = jasmine.createSpy("grammarUpdatedHandler")
grammar.on 'grammar-updated', grammarUpdatedHandler
{tokens} = grammar.tokenizeLine("<div class='name'><% <<-SQL select * from users;")
expect(tokens[12].value).toBe " select * from users;"
atom.packages.activatePackage('language-sql', sync: true)
expect(grammarUpdatedHandler).toHaveBeenCalled()
{tokens} = grammar.tokenizeLine("<div class='name'><% <<-SQL select * from users;")
expect(tokens[12].value).toBe " "
expect(tokens[13].value).toBe "select"
describe "when a grammar matching the desired scope is unavailable", ->
it "updates the grammar if a matching grammar is added later", ->
atom.packages.deactivatePackage('language-html')
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
grammar = atom.syntax.grammarForScopeName('text.html.ruby')
{tokens} = grammar.tokenizeLine("<div class='name'><%= User.find(2).full_name %></div>")
expect(tokens[0]).toEqual value: "<div class='name'>", scopes: ["text.html.ruby"]
expect(tokens[1]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
expect(tokens[3]).toEqual value: 'User', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","support.class.ruby"]
atom.packages.activatePackage('language-html', sync: true)
{tokens} = grammar.tokenizeLine("<div class='name'><%= User.find(2).full_name %></div>")
expect(tokens[0]).toEqual value: '<', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.begin.html"]
expect(tokens[1]).toEqual value: 'div', scopes: ["text.html.ruby","meta.tag.block.any.html","entity.name.tag.block.any.html"]
expect(tokens[2]).toEqual value: ' ', scopes: ["text.html.ruby","meta.tag.block.any.html"]
expect(tokens[3]).toEqual value: 'class', scopes: ["text.html.ruby","meta.tag.block.any.html", "entity.other.attribute-name.html"]
expect(tokens[4]).toEqual value: '=', scopes: ["text.html.ruby","meta.tag.block.any.html"]
expect(tokens[5]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.begin.html"]
expect(tokens[6]).toEqual value: 'name', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html"]
expect(tokens[7]).toEqual value: '\'', scopes: ["text.html.ruby","meta.tag.block.any.html","string.quoted.single.html","punctuation.definition.string.end.html"]
expect(tokens[8]).toEqual value: '>', scopes: ["text.html.ruby","meta.tag.block.any.html","punctuation.definition.tag.end.html"]
expect(tokens[9]).toEqual value: '<%=', scopes: ["text.html.ruby","source.ruby.rails.embedded.html","punctuation.section.embedded.ruby"]
expect(tokens[10]).toEqual value: ' ', scopes: ["text.html.ruby","source.ruby.rails.embedded.html"]
it "can parse a grammar with newline characters in its regular expressions (regression)", ->
grammar = new TextMateGrammar
name: "test"
scopeName: "source.imaginaryLanguage"
repository: {}
patterns: [
{
name: "comment-body"
begin: "//"
end: "\\n"
beginCaptures:
"0": { name: "comment-start" }
}
]
{tokens, ruleStack} = grammar.tokenizeLine("// a singleLineComment")
expect(ruleStack.length).toBe 1
expect(ruleStack[0].scopeName).toBe "source.imaginaryLanguage"
expect(tokens.length).toBe 2
expect(tokens[0].value).toBe "//"
expect(tokens[1].value).toBe " a singleLineComment"
it "does not loop infinitely (regression)", ->
grammar = atom.syntax.selectGrammar("hello.js")
{tokens, ruleStack} = grammar.tokenizeLine("// line comment")
{tokens, ruleStack} = grammar.tokenizeLine(" // second line comment with a single leading space", ruleStack)
describe "when inside a C block", ->
beforeEach ->
atom.packages.activatePackage('language-c', sync: true)
it "correctly parses a method. (regression)", ->
grammar = atom.syntax.selectGrammar("hello.c")
{tokens, ruleStack} = grammar.tokenizeLine("if(1){m()}")
expect(tokens[5]).toEqual value: "m", scopes: ["source.c", "meta.block.c", "meta.function-call.c", "support.function.any-method.c"]
it "correctly parses nested blocks. (regression)", ->
grammar = atom.syntax.selectGrammar("hello.c")
{tokens, ruleStack} = grammar.tokenizeLine("if(1){if(1){m()}}")
expect(tokens[5]).toEqual value: "if", scopes: ["source.c", "meta.block.c", "keyword.control.c"]
expect(tokens[10]).toEqual value: "m", scopes: ["source.c", "meta.block.c", "meta.block.c", "meta.function-call.c", "support.function.any-method.c"]
describe "when the grammar can infinitely loop over a line", ->
it "aborts tokenization", ->
spyOn(console, 'error')
atom.packages.activatePackage("package-with-infinite-loop-grammar")
grammar = atom.syntax.selectGrammar("something.package-with-infinite-loop-grammar")
{tokens} = grammar.tokenizeLine("abc")
expect(tokens[0].value).toBe "a"
expect(tokens[1].value).toBe "bc"
expect(console.error).toHaveBeenCalled()
describe "when a grammar has a pattern that has back references in the match value", ->
it "does not special handle the back references and instead allows oniguruma to resolve them", ->
atom.packages.activatePackage('language-sass', sync: true)
grammar = atom.syntax.selectGrammar("style.scss")
{tokens} = grammar.tokenizeLine("@mixin x() { -moz-selector: whatever; }")
expect(tokens[9]).toEqual value: "-moz-selector", scopes: ["source.css.scss", "meta.property-list.scss", "meta.property-name.scss"]
describe "when a line has more tokens than `maxTokensPerLine`", ->
it "creates a final token with the remaining text and resets the ruleStack to match the begining of the line", ->
grammar = atom.syntax.selectGrammar("hello.js")
spyOn(grammar, 'getMaxTokensPerLine').andCallFake -> 5
originalRuleStack = [grammar.initialRule, grammar.initialRule, grammar.initialRule]
{tokens, ruleStack} = grammar.tokenizeLine("one(two(three(four(five(_param_)))))", originalRuleStack)
expect(tokens.length).toBe 5
expect(tokens[4].value).toBe "three(four(five(_param_)))))"
expect(ruleStack).toEqual originalRuleStack
describe "when a grammar has a capture with patterns", ->
it "matches the patterns and includes the scope specified as the pattern's match name", ->
grammar = atom.syntax.selectGrammar("hello.php")
{tokens} = grammar.tokenizeLine("<?php public final function meth() {} ?>")
expect(tokens[2].value).toBe "public"
expect(tokens[2].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.modifier.php"]
expect(tokens[3].value).toBe " "
expect(tokens[3].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php"]
expect(tokens[4].value).toBe "final"
expect(tokens[4].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.modifier.php"]
expect(tokens[5].value).toBe " "
expect(tokens[5].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php"]
expect(tokens[6].value).toBe "function"
expect(tokens[6].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.type.function.php"]
it "ignores child captures of a capture with patterns", ->
grammar = new TextMateGrammar
name: "test"
scopeName: "source"
repository: {}
patterns: [
{
name: "text"
match: "(a(b))"
captures:
"1":
patterns: [
{
match: "ab"
name: "a"
}
]
"2":
name: "b"
}
]
{tokens} = grammar.tokenizeLine("ab")
expect(tokens[0].value).toBe "ab"
expect(tokens[0].scopes).toEqual ["source", "text", "a"]
describe "when the grammar has injections", ->
it "correctly includes the injected patterns when tokenizing", ->
grammar = atom.syntax.selectGrammar("hello.php")
{tokens} = grammar.tokenizeLine("<div><?php function hello() {} ?></div>")
expect(tokens[3].value).toBe "<?php"
expect(tokens[3].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "punctuation.section.embedded.begin.php"]
expect(tokens[5].value).toBe "function"
expect(tokens[5].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "storage.type.function.php"]
expect(tokens[7].value).toBe "hello"
expect(tokens[7].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "meta.function.php", "entity.name.function.php"]
expect(tokens[14].value).toBe "?"
expect(tokens[14].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "source.php", "punctuation.section.embedded.end.php", "source.php"]
expect(tokens[15].value).toBe ">"
expect(tokens[15].scopes).toEqual ["text.html.php", "meta.embedded.line.php", "punctuation.section.embedded.end.php"]
expect(tokens[16].value).toBe "</"
expect(tokens[16].scopes).toEqual ["text.html.php", "meta.tag.block.any.html", "punctuation.definition.tag.begin.html"]
expect(tokens[17].value).toBe "div"
expect(tokens[17].scopes).toEqual ["text.html.php", "meta.tag.block.any.html", "entity.name.tag.block.any.html"]
describe "when the grammar's pattern name has a group number in it", ->
it "replaces the group number with the matched captured text", ->
atom.packages.activatePackage('language-hyperlink', sync: true)
grammar = atom.syntax.grammarForScopeName("text.hyperlink")
{tokens} = grammar.tokenizeLine("https://github.com")
expect(tokens[0].scopes).toEqual ["text.hyperlink", "markup.underline.link.https.hyperlink"]
describe "when the grammar has an injection selector", ->
it "includes the grammar's patterns when the selector matches the current scope in other grammars", ->
atom.packages.activatePackage('language-hyperlink', sync: true)
grammar = atom.syntax.selectGrammar("text.js")
{tokens} = grammar.tokenizeLine("var i; // http://github.com")
expect(tokens[0].value).toBe "var"
expect(tokens[0].scopes).toEqual ["source.js", "storage.modifier.js"]
expect(tokens[6].value).toBe "http://github.com"
expect(tokens[6].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is added", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
editor = atom.project.openSync('sample.js')
editor.setText("// http://github.com")
{tokens} = editor.lineForScreenRow(0)
expect(tokens[1].value).toBe " http://github.com"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
atom.packages.activatePackage('language-hyperlink', sync: true)
{tokens} = editor.lineForScreenRow(0)
expect(tokens[2].value).toBe "http://github.com"
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "markup.underline.link.http.hyperlink"]
describe "when the grammar is updated", ->
it "retokenizes existing buffers that contain tokens that match the injection selector", ->
editor = atom.project.openSync('sample.js')
editor.setText("// SELECT * FROM OCTOCATS")
{tokens} = editor.lineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
atom.syntax.addGrammar(new TextMateGrammar(
name: "test"
scopeName: "source.test"
repository: {}
injectionSelector: "comment"
patterns: [ { include: "source.sql" } ]
))
{tokens} = editor.lineForScreenRow(0)
expect(tokens[1].value).toBe " SELECT * FROM OCTOCATS"
expect(tokens[1].scopes).toEqual ["source.js", "comment.line.double-slash.js"]
atom.packages.activatePackage('language-sql', sync: true)
{tokens} = editor.lineForScreenRow(0)
expect(tokens[2].value).toBe "SELECT"
expect(tokens[2].scopes).toEqual ["source.js", "comment.line.double-slash.js", "keyword.other.DML.sql"]
describe "when the position doesn't advance and rule includes $self and matches itself", ->
it "tokenizes the entire line using the rule", ->
grammar = new TextMateGrammar
name: "test"
scopeName: "source"
repository: {}
patterns: [
{
name: "text"
begin: "(?=forever)"
end: "whatevs"
patterns: [
include: "$self"
]
}
]
{tokens} = grammar.tokenizeLine("forever and ever")
expect(tokens.length).toBe 1
expect(tokens[0].value).toBe "forever and ever"
expect(tokens[0].scopes).toEqual ["source", "text"]
describe "${capture:/command} style pattern names", ->
lines = null
beforeEach ->
atom.packages.activatePackage('language-todo', sync: true)
grammar = atom.syntax.selectGrammar('main.rb')
lines = grammar.tokenizeLines "# TODO be nicer"
it "replaces the number with the capture group and translates the text", ->
tokens = lines[0]
expect(tokens[2].value).toEqual "TODO"
expect(tokens[2].scopes).toEqual ["source.ruby", "comment.line.number-sign.ruby", "storage.type.class.todo"]
describe "language-specific integration tests", ->
lines = null
describe "Git commit messages", ->
beforeEach ->
atom.packages.activatePackage('language-git', sync: true)
grammar = atom.syntax.selectGrammar('COMMIT_EDITMSG')
lines = grammar.tokenizeLines """
longggggggggggggggggggggggggggggggggggggggggggggggg
# Please enter the commit message for your changes. Lines starting
"""
it "correctly parses a long line", ->
tokens = lines[0]
expect(tokens[0].value).toBe "longggggggggggggggggggggggggggggggggggggggggggggggg"
expect(tokens[0].scopes).toEqual ["text.git-commit", "meta.scope.message.git-commit", "invalid.deprecated.line-too-long.git-commit"]
it "correctly parses the number sign of the first comment line", ->
tokens = lines[1]
expect(tokens[0].value).toBe "#"
expect(tokens[0].scopes).toEqual ["text.git-commit", "meta.scope.metadata.git-commit", "comment.line.number-sign.git-commit", "punctuation.definition.comment.git-commit"]
describe "C++", ->
beforeEach ->
atom.packages.activatePackage('language-c', sync: true)
grammar = atom.syntax.selectGrammar('includes.cc')
lines = grammar.tokenizeLines """
#include "a.h"
#include "b.h"
"""
it "correctly parses the first include line", ->
tokens = lines[0]
expect(tokens[0].value).toBe "#"
expect(tokens[0].scopes).toEqual ["source.c++", "meta.preprocessor.c.include"]
expect(tokens[1].value).toBe 'include'
expect(tokens[1].scopes).toEqual ["source.c++", "meta.preprocessor.c.include", "keyword.control.import.include.c"]
it "correctly parses the second include line", ->
tokens = lines[1]
expect(tokens[0].value).toBe "#"
expect(tokens[0].scopes).toEqual ["source.c++", "meta.preprocessor.c.include"]
expect(tokens[1].value).toBe 'include'
expect(tokens[1].scopes).toEqual ["source.c++", "meta.preprocessor.c.include", "keyword.control.import.include.c"]
describe "Ruby", ->
beforeEach ->
grammar = atom.syntax.selectGrammar('hello.rb')
lines = grammar.tokenizeLines """
a = {
"b" => "c",
}
"""
it "doesn't loop infinitely (regression)", ->
expect(_.pluck(lines[0], 'value').join('')).toBe 'a = {'
expect(_.pluck(lines[1], 'value').join('')).toBe ' "b" => "c",'
expect(_.pluck(lines[2], 'value').join('')).toBe '}'
expect(_.pluck(lines[3], 'value').join('')).toBe ''
describe "Objective-C", ->
beforeEach ->
atom.packages.activatePackage('language-c', sync: true)
atom.packages.activatePackage('language-objective-c', sync: true)
grammar = atom.syntax.selectGrammar('function.mm')
lines = grammar.tokenizeLines """
void test() {
NSString *a = @"a\\nb";
}
"""
it "correctly parses variable type when it is a built-in Cocoa class", ->
tokens = lines[1]
expect(tokens[0].value).toBe "NSString"
expect(tokens[0].scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c", "support.class.cocoa"]
it "correctly parses the semicolon at the end of the line", ->
tokens = lines[1]
lastToken = _.last(tokens)
expect(lastToken.value).toBe ";"
expect(lastToken.scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c"]
it "correctly parses the string characters before the escaped character", ->
tokens = lines[1]
expect(tokens[2].value).toBe '@"'
expect(tokens[2].scopes).toEqual ["source.objc++", "meta.function.c", "meta.block.c", "string.quoted.double.objc", "punctuation.definition.string.begin.objc"]
describe "Java", ->
beforeEach ->
atom.packages.activatePackage('language-java', sync: true)
grammar = atom.syntax.selectGrammar('Function.java')
it "correctly parses single line comments", ->
lines = grammar.tokenizeLines """
public void test() {
//comment
}
"""
tokens = lines[1]
expect(tokens[0].scopes).toEqual ["source.java", "comment.line.double-slash.java", "punctuation.definition.comment.java"]
expect(tokens[0].value).toEqual '//'
expect(tokens[1].scopes).toEqual ["source.java", "comment.line.double-slash.java"]
expect(tokens[1].value).toEqual 'comment'
it "correctly parses nested method calls", ->
tokens = grammar.tokenizeLines('a(b(new Object[0]));')[0]
lastToken = _.last(tokens)
expect(lastToken.scopes).toEqual ['source.java', 'punctuation.terminator.java']
expect(lastToken.value).toEqual ';'
describe "HTML (Ruby - ERB)", ->
it "correctly parses strings inside tags", ->
grammar = atom.syntax.selectGrammar('page.erb')
lines = grammar.tokenizeLines '<% page_title "My Page" %>'
tokens = lines[0]
expect(tokens[2].value).toEqual '"'
expect(tokens[2].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby", "punctuation.definition.string.begin.ruby"]
expect(tokens[3].value).toEqual 'My Page'
expect(tokens[3].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby"]
expect(tokens[4].value).toEqual '"'
expect(tokens[4].scopes).toEqual ["text.html.erb", "meta.embedded.line.erb", "string.quoted.double.ruby", "punctuation.definition.string.end.ruby"]
it "does not loop infinitely on <%>", ->
atom.packages.activatePackage('language-html', sync: true)
atom.packages.activatePackage('language-ruby-on-rails', sync: true)
grammar = atom.syntax.selectGrammar('foo.html.erb')
[tokens] = grammar.tokenizeLines '<%>'
expect(tokens.length).toBe 1
expect(tokens[0].value).toEqual '<%>'
expect(tokens[0].scopes).toEqual ["text.html.erb"]
describe "Unicode support", ->
describe "Surrogate pair characters", ->
beforeEach ->
grammar = atom.syntax.selectGrammar('main.js')
lines = grammar.tokenizeLines "'\uD835\uDF97'"
it "correctly parses JavaScript strings containing surrogate pair characters", ->
tokens = lines[0]
expect(tokens.length).toBe 3
expect(tokens[0].value).toBe "'"
expect(tokens[1].value).toBe "\uD835\uDF97"
expect(tokens[2].value).toBe "'"
describe "when the line contains unicode characters", ->
it "correctly parses tokens starting after them", ->
atom.packages.activatePackage('language-json', sync: true)
grammar = atom.syntax.selectGrammar('package.json')
{tokens} = grammar.tokenizeLine '{"\u2026": 1}'
expect(tokens.length).toBe 8
expect(tokens[6].value).toBe '1'
expect(tokens[6].scopes).toEqual ["source.json", "meta.structure.dictionary.json", "meta.structure.dictionary.value.json", "constant.numeric.json"]
describe "python", ->
it "parses import blocks correctly", ->
grammar = atom.syntax.selectGrammar("file.py")
lines = grammar.tokenizeLines "import a\nimport b"
line1 = lines[0]
expect(line1.length).toBe 3
expect(line1[0].value).toEqual "import"
expect(line1[0].scopes).toEqual ["source.python", "keyword.control.import.python"]
expect(line1[1].value).toEqual " "
expect(line1[1].scopes).toEqual ["source.python"]
expect(line1[2].value).toEqual "a"
expect(line1[2].scopes).toEqual ["source.python"]
line2 = lines[1]
expect(line2.length).toBe 3
expect(line2[0].value).toEqual "import"
expect(line2[0].scopes).toEqual ["source.python", "keyword.control.import.python"]
expect(line2[1].value).toEqual " "
expect(line2[1].scopes).toEqual ["source.python"]
expect(line2[2].value).toEqual "b"
expect(line2[2].scopes).toEqual ["source.python"]

View File

@@ -113,7 +113,7 @@ describe "WorkspaceView", ->
it "passes focus to the first focusable element", ->
focusable1 = $$ -> @div "One", id: 'one', tabindex: -1
focusable2 = $$ -> @div "Two", id: 'two', tabindex: -1
atom.workspaceView.horizontal.append(focusable1, focusable2)
atom.workspaceView.appendToLeft(focusable1, focusable2)
expect(document.activeElement).toBe document.body
atom.workspaceView.focus()
@@ -122,7 +122,7 @@ describe "WorkspaceView", ->
describe "when there are no visible focusable elements", ->
it "surrenders focus to the body", ->
focusable = $$ -> @div "One", id: 'one', tabindex: -1
atom.workspaceView.horizontal.append(focusable)
atom.workspaceView.appendToLeft(focusable)
focusable.hide()
expect(document.activeElement).toBe document.body

View File

@@ -1,4 +1,3 @@
TextMateGrammar = require './text-mate-grammar'
Package = require './package'
fs = require 'fs-plus'
path = require 'path'
@@ -105,7 +104,7 @@ class AtomPackage extends Package
atom.keymap.add(keymapPath, map) for [keymapPath, map] in @keymaps
atom.contextMenu.add(menuPath, map['context-menu']) for [menuPath, map] in @menus
atom.menu.add(map.menu) for [menuPath, map] in @menus when map.menu
atom.syntax.addGrammar(grammar) for grammar in @grammars
grammar.activate() for grammar in @grammars
for [scopedPropertiesPath, selector, properties] in @scopedProperties
atom.syntax.addProperties(scopedPropertiesPath, selector, properties)
@@ -152,7 +151,7 @@ class AtomPackage extends Package
@grammars = []
grammarsDirPath = path.join(@path, 'grammars')
for grammarPath in fs.listSync(grammarsDirPath, ['.json', '.cson'])
@grammars.push(TextMateGrammar.loadSync(grammarPath))
@grammars.push(atom.syntax.readGrammarSync(grammarPath))
loadScopedProperties: ->
@scopedProperties = []
@@ -180,7 +179,7 @@ class AtomPackage extends Package
@configActivated = false
deactivateResources: ->
atom.syntax.removeGrammar(grammar) for grammar in @grammars
grammar.deactivate() for grammar in @grammars
atom.syntax.removeProperties(scopedPropertiesPath) for [scopedPropertiesPath] in @scopedProperties
atom.keymap.remove(keymapPath) for [keymapPath] in @keymaps
atom.themes.removeStylesheet(stylesheetPath) for [stylesheetPath] in @stylesheets

View File

@@ -281,6 +281,7 @@ class Atom extends Model
@workspaceView = null
@project.destroy()
@windowEventHandler?.unsubscribe()
@keymap.destroy()
@windowState = null
# Private:

View File

@@ -250,10 +250,14 @@ class Cursor
moveToBottom: ->
@setBufferPosition(@editor.getEofBufferPosition())
# Public: Moves the cursor to the beginning of the screen line.
moveToBeginningOfLine: ->
# Public: Moves the cursor to the beginning of the line.
moveToBeginningOfScreenLine: ->
@setScreenPosition([@getScreenRow(), 0])
# Public: Moves the cursor to the beginning of the buffer line.
moveToBeginningOfLine: ->
@setBufferPosition([@getBufferRow(), 0])
# Public: Moves the cursor to the beginning of the first character in the
# line.
moveToFirstCharacterOfLine: ->
@@ -261,9 +265,7 @@ class Cursor
screenline = @editor.lineForScreenRow(row)
goalColumn = screenline.text.search(/\S/)
return if goalColumn == -1
goalColumn = 0 if goalColumn == column
goalColumn = 0 if goalColumn == column or goalColumn == -1
@setScreenPosition([row, goalColumn])
# Public: Moves the cursor to the beginning of the buffer line, skipping all
@@ -277,9 +279,13 @@ class Cursor
@setBufferPosition(endOfLeadingWhitespace) if endOfLeadingWhitespace.isGreaterThan(position)
# Public: Moves the cursor to the end of the line.
moveToEndOfScreenLine: ->
@setScreenPosition([@getScreenRow(), Infinity])
# Public: Moves the cursor to the end of the buffer line.
moveToEndOfLine: ->
@setScreenPosition([@getScreenRow(), Infinity])
@setBufferPosition([@getBufferRow(), Infinity])
# Public: Moves the cursor to the beginning of the word.
moveToBeginningOfWord: ->

View File

@@ -359,7 +359,7 @@ class DisplayBuffer extends Model
# Get the grammar for this buffer.
#
# Returns the current {TextMateGrammar} or the {NullGrammar}.
# Returns the current {Grammar} or the {NullGrammar}.
getGrammar: ->
@tokenizedBuffer.grammar

View File

@@ -139,7 +139,9 @@ class EditorView extends View
'editor:delete-to-end-of-word': @deleteToEndOfWord
'editor:delete-line': @deleteLine
'editor:cut-to-end-of-line': @cutToEndOfLine
'editor:move-to-beginning-of-screen-line': => @editor.moveCursorToBeginningOfScreenLine()
'editor:move-to-beginning-of-line': @moveCursorToBeginningOfLine
'editor:move-to-end-of-screen-line': => @editor.moveCursorToEndOfScreenLine()
'editor:move-to-end-of-line': @moveCursorToEndOfLine
'editor:move-to-first-character-of-line': @moveCursorToFirstCharacterOfLine
'editor:move-to-beginning-of-word': @moveCursorToBeginningOfWord
@@ -1229,7 +1231,7 @@ class EditorView extends View
updateDisplay: (options={}) ->
return unless @attached and @editor
return if @editor.destroyed
return if @editor.isDestroyed()
unless @isOnDom() and @isVisible()
@redrawOnReattach = true
return

View File

@@ -117,9 +117,7 @@ class Editor extends Model
require './editor-view'
# Private:
destroy: ->
return if @destroyed
@destroyed = true
destroyed: ->
@unsubscribe()
selection.destroy() for selection in @getSelections()
@buffer.release()
@@ -1073,6 +1071,10 @@ class Editor extends Model
@moveCursors (cursor) -> cursor.moveToBottom()
# Public: Moves every local cursor to the beginning of the line.
moveCursorToBeginningOfScreenLine: ->
@moveCursors (cursor) -> cursor.moveToBeginningOfScreenLine()
# Public: Moves every local cursor to the beginning of the buffer line.
moveCursorToBeginningOfLine: ->
@moveCursors (cursor) -> cursor.moveToBeginningOfLine()
@@ -1081,6 +1083,10 @@ class Editor extends Model
@moveCursors (cursor) -> cursor.moveToFirstCharacterOfLine()
# Public: Moves every local cursor to the end of the line.
moveCursorToEndOfScreenLine: ->
@moveCursors (cursor) -> cursor.moveToEndOfScreenLine()
# Public: Moves every local cursor to the end of the buffer line.
moveCursorToEndOfLine: ->
@moveCursors (cursor) -> cursor.moveToEndOfLine()

View File

@@ -4,6 +4,7 @@ fs = require 'fs-plus'
path = require 'path'
CSON = require 'season'
KeyBinding = require './key-binding'
File = require './file'
{Emitter} = require 'emissary'
Modifiers = ['alt', 'control', 'ctrl', 'shift', 'cmd']
@@ -28,6 +29,9 @@ class Keymap
constructor: ({@resourcePath, @configDirPath})->
@keyBindings = []
destroy: ->
@unwatchUserKeymap()
# Public: Returns an array of all {KeyBinding}s.
getKeyBindings: ->
_.clone(@keyBindings)
@@ -118,9 +122,21 @@ class Keymap
@loadDirectory(path.join(@resourcePath, 'keymaps'))
@emit('bundled-keymaps-loaded')
userKeymapPath: ->
CSON.resolve(path.join(@configDirPath, 'keymap'))
unwatchUserKeymap: ->
keymapPath = @userKeymapPath()
@userKeymapFile?.off()
@remove(keymapPath) if keymapPath
loadUserKeymap: ->
userKeymapPath = CSON.resolve(path.join(@configDirPath, 'keymap'))
@load(userKeymapPath) if userKeymapPath
keymapPath = @userKeymapPath()
@unwatchUserKeymap()
if keymapPath
@load(keymapPath)
@userKeymapFile = new File(keymapPath)
@userKeymapFile.on 'contents-changed', => @loadUserKeymap()
loadDirectory: (directoryPath) ->
@load(filePath) for filePath in fs.listSync(directoryPath, ['.cson', '.json'])

View File

@@ -289,9 +289,6 @@ class LanguageMode
if desiredIndentLevel >= 0 and desiredIndentLevel < currentIndentLevel
@editor.setIndentationForBufferRow(bufferRow, desiredIndentLevel)
tokenizeLine: (line, stack, firstLine) ->
{tokens, stack} = @grammar.tokenizeLine(line, stack, firstLine)
getRegexForProperty: (scopes, property) ->
if pattern = atom.syntax.getProperty(scopes, property)
new OnigRegExp(pattern)

View File

@@ -1,23 +0,0 @@
Token = require './token'
{Emitter} = require 'emissary'
### Internal ###
module.exports =
class NullGrammar
Emitter.includeInto(this)
name: 'Null Grammar'
scopeName: 'text.plain.null-grammar'
getScore: -> 0
tokenizeLine: (line) ->
{ tokens: [new Token(value: line, scopes: ['null-grammar.text.plain'])] }
tokenizeLines: (text) ->
lines = text.split('\n')
for line, i in lines
{tokens} = @tokenizeLine(line)
tokens
grammarUpdated: -> # noop

View File

@@ -226,10 +226,6 @@ class PackageManager
{@packageDependencies} = JSON.parse(fs.readFileSync(metadataPath)) ? {}
@packageDependencies ?= {}
# Temporarily ignore 'grunt-download-atom-shell' here, should remove this
# when it became a public npm module.
delete @packageDependencies['grunt-download-atom-shell']
@packageDependencies
# Public: Get an array of all the available package paths.

View File

@@ -186,13 +186,13 @@ class Pane extends View
false
# Public: Remove the specified item.
destroyItem: (item) ->
destroyItem: (item, options) ->
@unsubscribe(item) if _.isFunction(item.off)
@trigger 'pane:before-item-destroyed', [item]
if @promptToSaveItem(item)
@getContainer()?.itemDestroyed(item)
@removeItem(item)
@removeItem(item, options)
item.destroy?()
true
else

View File

@@ -21,7 +21,7 @@ class Selection
@marker.on 'destroyed', =>
@destroyed = true
@editor.removeSelection(this)
@emit 'destroyed' unless @editor.destroyed
@emit 'destroyed' unless @editor.isDestroyed()
# Private:
destroy: ->
@@ -216,7 +216,7 @@ class Selection
# Public: Selects all the text from the current cursor position to the end of
# the line.
selectToEndOfLine: ->
@modifySelection => @cursor.moveToEndOfLine()
@modifySelection => @cursor.moveToEndOfScreenLine()
# Public: Selects all the text from the current cursor position to the
# beginning of the word.

View File

@@ -1,15 +1,15 @@
_ = require 'underscore-plus'
{specificity} = require 'clear-cut'
{Subscriber} = require 'emissary'
{GrammarRegistry, ScopeSelector} = require 'first-mate'
{$, $$} = require './space-pen-extensions'
{Emitter} = require 'emissary'
NullGrammar = require './null-grammar'
TextMateScopeSelector = require('first-mate').ScopeSelector
### Internal ###
Token = require './token'
### Public ###
module.exports =
class Syntax
Emitter.includeInto(this)
class Syntax extends GrammarRegistry
Subscriber.includeInto(this)
atom.deserializers.add(this)
@@ -19,53 +19,15 @@ class Syntax
syntax
constructor: ->
@nullGrammar = new NullGrammar
@grammars = [@nullGrammar]
@grammarsByScopeName = {}
@injectionGrammars = []
@grammarOverridesByPath = {}
super
@scopedPropertiesIndex = 0
@scopedProperties = []
serialize: ->
{ deserializer: @constructor.name, @grammarOverridesByPath }
{deserializer: @constructor.name, @grammarOverridesByPath}
addGrammar: (grammar) ->
previousGrammars = new Array(@grammars...)
@grammars.push(grammar)
@grammarsByScopeName[grammar.scopeName] = grammar
@injectionGrammars.push(grammar) if grammar.injectionSelector?
@grammarUpdated(grammar.scopeName)
@emit 'grammar-added', grammar
removeGrammar: (grammar) ->
_.remove(@grammars, grammar)
delete @grammarsByScopeName[grammar.scopeName]
_.remove(@injectionGrammars, grammar)
@grammarUpdated(grammar.scopeName)
grammarUpdated: (scopeName) ->
for grammar in @grammars when grammar.scopeName isnt scopeName
@emit 'grammar-updated', grammar if grammar.grammarUpdated(scopeName)
setGrammarOverrideForPath: (path, scopeName) ->
@grammarOverridesByPath[path] = scopeName
clearGrammarOverrideForPath: (path) ->
delete @grammarOverridesByPath[path]
clearGrammarOverrides: ->
@grammarOverridesByPath = {}
selectGrammar: (filePath, fileContents) ->
grammar = _.max @grammars, (grammar) -> grammar.getScore(filePath, fileContents)
grammar
grammarOverrideForPath: (path) ->
@grammarOverridesByPath[path]
grammarForScopeName: (scopeName) ->
@grammarsByScopeName[scopeName]
createToken: (value, scopes) -> new Token({value, scopes})
addProperties: (args...) ->
name = args.shift() if args.length > 2
@@ -132,4 +94,4 @@ class Syntax
element[0]
cssSelectorFromScopeSelector: (scopeSelector) ->
new TextMateScopeSelector(scopeSelector).toCssSelector()
new ScopeSelector(scopeSelector).toCssSelector()

View File

@@ -1,522 +0,0 @@
_ = require 'underscore-plus'
fs = require 'fs-plus'
Token = require './token'
{OnigRegExp, OnigScanner} = require 'oniguruma'
path = require 'path'
{Emitter} = require 'emissary'
{ScopeSelector} = require 'first-mate'
pathSplitRegex = new RegExp("[#{_.escapeRegExp(path.sep)}.]")
### Internal ###
module.exports =
class TextMateGrammar
Emitter.includeInto(this)
@load: (grammarPath, done) ->
fs.readObject grammarPath, (error, object) ->
if error?
done(error)
else
done(null, new TextMateGrammar(object))
@loadSync: (grammarPath) ->
new TextMateGrammar(fs.readObjectSync(grammarPath))
name: null
rawPatterns: null
rawRepository: null
fileTypes: null
scopeName: null
repository: null
initialRule: null
firstLineRegex: null
includedGrammarScopes: null
maxTokensPerLine: 100
constructor: ({ @name, @fileTypes, @scopeName, injections, injectionSelector, patterns, repository, @foldingStopMarker, firstLineMatch}) ->
@rawPatterns = patterns
@rawRepository = repository
@injections = new Injections(this, injections)
if injectionSelector?
@injectionSelector = new ScopeSelector(injectionSelector)
@firstLineRegex = new OnigRegExp(firstLineMatch) if firstLineMatch
@fileTypes ?= []
@includedGrammarScopes = []
clearRules: ->
@initialRule = null
@repository = null
getInitialRule: ->
@initialRule ?= new Rule(this, {@scopeName, patterns: @rawPatterns})
getRepository: ->
@repository ?= do =>
repository = {}
for name, data of @rawRepository
data = {patterns: [data], tempName: name} if data.begin? or data.match?
repository[name] = new Rule(this, data)
repository
addIncludedGrammarScope: (scope) ->
@includedGrammarScopes.push(scope) unless _.include(@includedGrammarScopes, scope)
grammarUpdated: (scopeName) ->
return false unless _.include(@includedGrammarScopes, scopeName)
@clearRules()
atom.syntax.grammarUpdated(@scopeName)
@emit 'grammar-updated'
true
getScore: (filePath, contents) ->
contents = fs.readFileSync(filePath, 'utf8') if not contents? and fs.isFileSync(filePath)
if atom.syntax.grammarOverrideForPath(filePath) is @scopeName
2 + (filePath?.length ? 0)
else if @matchesContents(contents)
1 + (filePath?.length ? 0)
else
@getPathScore(filePath)
matchesContents: (contents) ->
return false unless contents? and @firstLineRegex?
escaped = false
numberOfNewlinesInRegex = 0
for character in @firstLineRegex.source
switch character
when '\\'
escaped = !escaped
when 'n'
numberOfNewlinesInRegex++ if escaped
escaped = false
else
escaped = false
lines = contents.split('\n')
@firstLineRegex.test(lines[0..numberOfNewlinesInRegex].join('\n'))
getPathScore: (filePath) ->
return -1 unless filePath?
pathComponents = filePath.split(pathSplitRegex)
pathScore = -1
@fileTypes.forEach (fileType) ->
fileTypeComponents = fileType.split(pathSplitRegex)
pathSuffix = pathComponents[-fileTypeComponents.length..-1]
if _.isEqual(pathSuffix, fileTypeComponents)
pathScore = Math.max(pathScore, fileType.length)
pathScore
tokenizeLine: (line, ruleStack=[@getInitialRule()], firstLine=false) ->
originalRuleStack = ruleStack
ruleStack = new Array(ruleStack...) # clone ruleStack
tokens = []
position = 0
loop
scopes = scopesFromStack(ruleStack)
previousRuleStackLength = ruleStack.length
previousPosition = position
if tokens.length >= (@getMaxTokensPerLine() - 1)
token = new Token(value: line[position..], scopes: scopes)
tokens.push token
ruleStack = originalRuleStack
break
break if position == line.length + 1 # include trailing newline position
if match = _.last(ruleStack).getNextTokens(ruleStack, line, position, firstLine)
{ nextTokens, tokensStartPosition, tokensEndPosition } = match
if position < tokensStartPosition # unmatched text before next tokens
tokens.push(new Token(
value: line[position...tokensStartPosition]
scopes: scopes
))
tokens.push(nextTokens...)
position = tokensEndPosition
break if position is line.length and nextTokens.length is 0 and ruleStack.length is previousRuleStackLength
else # push filler token for unmatched text at end of line
if position < line.length or line.length == 0
tokens.push(new Token(
value: line[position...line.length]
scopes: scopes
))
break
if position == previousPosition
if ruleStack.length == previousRuleStackLength
console.error("Popping rule because it loops at column #{position} of line '#{line}'", _.clone(ruleStack))
ruleStack.pop()
else if ruleStack.length > previousRuleStackLength # Stack size increased with zero length match
[penultimateRule, lastRule] = ruleStack[-2..]
# Same exact rule was pushed but position wasn't advanced
if lastRule? and lastRule == penultimateRule
popStack = true
# Rule with same scope name as previous rule was pushed but position wasn't advanced
if lastRule?.scopeName? and penultimateRule.scopeName == lastRule.scopeName
popStack = true
if popStack
ruleStack.pop()
tokens.push(new Token(
value: line[position...line.length]
scopes: scopes
))
break
ruleStack.forEach (rule) -> rule.clearAnchorPosition()
{ tokens, ruleStack }
tokenizeLines: (text) ->
lines = text.split('\n')
ruleStack = null
for line, i in lines
{ tokens, ruleStack } = @tokenizeLine(line, ruleStack, i is 0)
tokens
getMaxTokensPerLine: ->
@maxTokensPerLine
class Injections
@injections: null
constructor: (grammar, injections={}) ->
@injections = []
@scanners = {}
for selector, values of injections
continue unless values?.patterns?.length > 0
patterns = []
anchored = false
for regex in values.patterns
pattern = new Pattern(grammar, regex)
anchored = true if pattern.anchored
patterns.push(pattern.getIncludedPatterns(grammar, patterns)...)
@injections.push
anchored: anchored
selector: new ScopeSelector(selector)
patterns: patterns
getScanner: (injection, firstLine, position, anchorPosition) ->
return injection.scanner if injection.scanner?
regexes = _.map injection.patterns, (pattern) ->
pattern.getRegex(firstLine, position, anchorPosition)
scanner = new OnigScanner(regexes)
scanner.patterns = injection.patterns
scanner.anchored = injection.anchored
injection.scanner = scanner unless scanner.anchored
scanner
getScanners: (ruleStack, firstLine, position, anchorPosition) ->
scanners = []
scopes = scopesFromStack(ruleStack)
for injection in @injections
if injection.selector.matches(scopes)
scanner = @getScanner(injection, firstLine, position, anchorPosition)
scanners.push(scanner)
scanners
class Rule
grammar: null
scopeName: null
patterns: null
scannersByBaseGrammarName: null
createEndPattern: null
anchorPosition: -1
constructor: (@grammar, {@scopeName, patterns, @endPattern}) ->
patterns ?= []
@patterns = patterns.map (pattern) => new Pattern(grammar, pattern)
@patterns.unshift(@endPattern) if @endPattern and !@endPattern.hasBackReferences
@scannersByBaseGrammarName = {}
getIncludedPatterns: (baseGrammar, included=[]) ->
return [] if _.include(included, this)
included = included.concat([this])
allPatterns = []
for pattern in @patterns
allPatterns.push(pattern.getIncludedPatterns(baseGrammar, included)...)
allPatterns
clearAnchorPosition: -> @anchorPosition = -1
createScanner: (patterns, firstLine, position) ->
anchored = false
regexes = _.map patterns, (pattern) =>
anchored = true if pattern.anchored
pattern.getRegex(firstLine, position, @anchorPosition)
scanner = new OnigScanner(regexes)
scanner.patterns = patterns
scanner.anchored = anchored
scanner
getScanner: (baseGrammar, position, firstLine) ->
return scanner if scanner = @scannersByBaseGrammarName[baseGrammar.name]
patterns = @getIncludedPatterns(baseGrammar)
scanner = @createScanner(patterns, firstLine, position)
@scannersByBaseGrammarName[baseGrammar.name] = scanner unless scanner.anchored
scanner
scanInjections: (ruleStack, line, position, firstLine) ->
baseGrammar = ruleStack[0].grammar
if injections = baseGrammar.injections
scanners = injections.getScanners(ruleStack, position, firstLine, @anchorPosition)
for scanner in scanners
result = scanner.findNextMatch(line, position)
return result if result?
normalizeCaptureIndices: (line, captureIndices) ->
lineLength = line.length
captureIndices.forEach (capture) ->
capture.end = Math.min(capture.end, lineLength)
capture.start = Math.min(capture.start, lineLength)
findNextMatch: (ruleStack, line, position, firstLine) ->
lineWithNewline = "#{line}\n"
baseGrammar = ruleStack[0].grammar
results = []
scanner = @getScanner(baseGrammar, position, firstLine)
if result = scanner.findNextMatch(lineWithNewline, position)
results.push(result)
if result = @scanInjections(ruleStack, lineWithNewline, position, firstLine)
results.push(result)
scopes = scopesFromStack(ruleStack)
for injectionGrammar in _.without(atom.syntax.injectionGrammars, @grammar, baseGrammar)
if injectionGrammar.injectionSelector.matches(scopes)
scanner = injectionGrammar.getInitialRule().getScanner(injectionGrammar, position, firstLine)
if result = scanner.findNextMatch(lineWithNewline, position)
results.push(result)
if results.length > 0
_.min results, (result) =>
@normalizeCaptureIndices(line, result.captureIndices)
result.captureIndices[0].start
getNextTokens: (ruleStack, line, position, firstLine) ->
result = @findNextMatch(ruleStack, line, position, firstLine)
return null unless result?
{ index, captureIndices, scanner } = result
firstCapture = captureIndices[0]
nextTokens = scanner.patterns[index].handleMatch(ruleStack, line, captureIndices)
{ nextTokens, tokensStartPosition: firstCapture.start, tokensEndPosition: firstCapture.end }
getRuleToPush: (line, beginPatternCaptureIndices) ->
if @endPattern.hasBackReferences
rule = new Rule(@grammar, {@scopeName})
rule.endPattern = @endPattern.resolveBackReferences(line, beginPatternCaptureIndices)
rule.patterns = [rule.endPattern, @patterns...]
rule
else
this
class Pattern
grammar: null
pushRule: null
popRule: false
scopeName: null
captures: null
backReferences: null
anchored: false
constructor: (@grammar, { name, contentName, @include, match, begin, end, captures, beginCaptures, endCaptures, patterns, @popRule, @hasBackReferences}) ->
@scopeName = name ? contentName # TODO: We need special treatment of contentName
if match
if (end or @popRule) and @hasBackReferences ?= /\\\d+/.test(match)
@match = match
else
@regexSource = match
@captures = captures
else if begin
@regexSource = begin
@captures = beginCaptures ? captures
endPattern = new Pattern(@grammar, { match: end, captures: endCaptures ? captures, popRule: true})
@pushRule = new Rule(@grammar, { @scopeName, patterns, endPattern })
if @captures?
for group, capture of @captures
if capture.patterns?.length > 0 and not capture.rule
capture.scopeName = @scopeName
capture.rule = new Rule(@grammar, capture)
@anchored = @hasAnchor()
getRegex: (firstLine, position, anchorPosition) ->
if @anchored
@replaceAnchor(firstLine, position, anchorPosition)
else
@regexSource
hasAnchor: ->
return false unless @regexSource
escape = false
for character in @regexSource.split('')
return true if escape and 'AGz'.indexOf(character) isnt -1
escape = not escape and character is '\\'
false
replaceAnchor: (firstLine, offset, anchor) ->
escaped = []
placeholder = '\uFFFF'
escape = false
for character in @regexSource.split('')
if escape
switch character
when 'A'
if firstLine
escaped.push("\\#{character}")
else
escaped.push(placeholder)
when 'G'
if offset is anchor
escaped.push("\\#{character}")
else
escaped.push(placeholder)
when 'z' then escaped.push('$(?!\n)(?<!\n)')
else escaped.push("\\#{character}")
escape = false
else if character is '\\'
escape = true
else
escaped.push(character)
escaped.join('')
resolveBackReferences: (line, beginCaptureIndices) ->
beginCaptures = []
for {start, end} in beginCaptureIndices
beginCaptures.push line[start...end]
resolvedMatch = @match.replace /\\\d+/g, (match) ->
index = parseInt(match[1..])
_.escapeRegExp(beginCaptures[index] ? "\\#{index}")
new Pattern(@grammar, { hasBackReferences: false, match: resolvedMatch, @captures, @popRule })
ruleForInclude: (baseGrammar, name) ->
if name[0] == "#"
@grammar.getRepository()[name[1..]]
else if name == "$self"
@grammar.getInitialRule()
else if name == "$base"
baseGrammar.getInitialRule()
else
@grammar.addIncludedGrammarScope(name)
atom.syntax.grammarForScopeName(name)?.getInitialRule()
getIncludedPatterns: (baseGrammar, included) ->
if @include
rule = @ruleForInclude(baseGrammar, @include)
rule?.getIncludedPatterns(baseGrammar, included) ? []
else
[this]
resolveScopeName: (line, captureIndices) ->
resolvedScopeName = @scopeName.replace /\${(\d+):\/(downcase|upcase)}/, (match, index, command) ->
capture = captureIndices[parseInt(index)]
if capture?
replacement = line.substring(capture.start, capture.end)
switch command
when 'downcase' then replacement.toLowerCase()
when 'upcase' then replacement.toUpperCase()
else replacement
else
match
resolvedScopeName.replace /\$(\d+)/, (match, index) ->
capture = captureIndices[parseInt(index)]
if capture?
line.substring(capture.start, capture.end)
else
match
handleMatch: (stack, line, captureIndices) ->
scopes = scopesFromStack(stack)
if @scopeName and not @popRule
scopes.push(@resolveScopeName(line, captureIndices))
if @captures
tokens = @getTokensForCaptureIndices(line, _.clone(captureIndices), scopes, stack)
else
{start, end} = captureIndices[0]
zeroLengthMatch = end == start
if zeroLengthMatch
tokens = []
else
tokens = [new Token(value: line[start...end], scopes: scopes)]
if @pushRule
ruleToPush = @pushRule.getRuleToPush(line, captureIndices)
ruleToPush.anchorPosition = captureIndices[0].end
stack.push(ruleToPush)
else if @popRule
stack.pop()
tokens
getTokensForCaptureRule: (rule, line, captureStart, captureEnd, scopes, stack) ->
captureText = line.substring(captureStart, captureEnd)
{tokens} = rule.grammar.tokenizeLine(captureText, [stack..., rule])
tokens
getTokensForCaptureIndices: (line, captureIndices, scopes, stack) ->
parentCapture = captureIndices.shift()
tokens = []
if scope = @captures[parentCapture.index]?.name
scopes = scopes.concat(scope)
if captureRule = @captures[parentCapture.index]?.rule
captureTokens = @getTokensForCaptureRule(captureRule, line, parentCapture.start, parentCapture.end, scopes, stack)
tokens.push(captureTokens...)
# Consume child captures
while captureIndices.length and captureIndices[0].start < parentCapture.end
captureIndices.shift()
else
previousChildCaptureEnd = parentCapture.start
while captureIndices.length and captureIndices[0].start < parentCapture.end
childCapture = captureIndices[0]
emptyCapture = childCapture.end - childCapture.start == 0
captureHasNoScope = not @captures[childCapture.index]
if emptyCapture or captureHasNoScope
captureIndices.shift()
continue
if childCapture.start > previousChildCaptureEnd
tokens.push(new Token(
value: line[previousChildCaptureEnd...childCapture.start]
scopes: scopes
))
captureTokens = @getTokensForCaptureIndices(line, captureIndices, scopes, stack)
tokens.push(captureTokens...)
previousChildCaptureEnd = childCapture.end
if parentCapture.end > previousChildCaptureEnd
tokens.push(new Token(
value: line[previousChildCaptureEnd...parentCapture.end]
scopes: scopes
))
tokens
### Internal ###
scopesFromStack = (stack) ->
_.compact(_.pluck(stack, "scopeName"))

View File

@@ -2,7 +2,6 @@ Package = require './package'
path = require 'path'
_ = require 'underscore-plus'
fs = require 'fs-plus'
TextMateGrammar = require './text-mate-grammar'
async = require 'async'
### Internal ###
@@ -41,14 +40,14 @@ class TextMatePackage extends Package
activate: ->
@measure 'activateTime', =>
atom.syntax.addGrammar(grammar) for grammar in @grammars
grammar.activate() for grammar in @grammars
for { selector, properties } in @scopedProperties
atom.syntax.addProperties(@path, selector, properties)
activateConfig: -> # noop
deactivate: ->
atom.syntax.removeGrammar(grammar) for grammar in @grammars
grammar.deactivate() for grammar in @grammars
atom.syntax.removeProperties(@path)
legalGrammarExtensions: ['plist', 'tmLanguage', 'tmlanguage', 'json', 'cson']
@@ -66,18 +65,20 @@ class TextMatePackage extends Package
done()
loadGrammarAtPath: (grammarPath, done) =>
TextMateGrammar.load grammarPath, (err, grammar) =>
return console.log("Error loading grammar at path '#{grammarPath}':", err.stack ? err) if err
@addGrammar(grammar)
done()
atom.syntax.readGrammar grammarPath, (error, grammar) =>
if error?
console.log("Error loading grammar at path '#{grammarPath}':", error.stack ? error)
else
@addGrammar(grammar)
done?()
loadGrammarsSync: ->
for grammarPath in fs.listSync(@getSyntaxesPath(), @legalGrammarExtensions)
@addGrammar(TextMateGrammar.loadSync(grammarPath))
@addGrammar(atom.syntax.readGrammarSync(grammarPath))
addGrammar: (grammar) ->
@grammars.push(grammar)
atom.syntax.addGrammar(grammar) if @isActive()
grammar.activate() if @isActive()
getGrammars: -> @grammars

View File

@@ -258,6 +258,38 @@ class WorkspaceView extends View
getOpenBufferPaths: ->
_.uniq(_.flatten(@getEditorViews().map (editorView) -> editorView.getOpenBufferPaths()))
# Public: Prepends the element to the top of the window.
prependToTop: (element) ->
@vertical.prepend(element)
# Public: Appends the element to the top of the window.
appendToTop: (element) ->
@panes.before(element)
# Public: Prepends the element to the bottom of the window.
prependToBottom: (element) ->
@panes.after(element)
# Public: Appends the element to bottom of the window.
appendToBottom: (element) ->
@vertical.append(element)
# Public: Prepends the element to the left side of the window.
prependToLeft: (element) ->
@horizontal.prepend(element)
# Public: Appends the element to the left side of the window.
appendToLeft: (element) ->
@vertical.before(element)
# Public: Prepends the element to the right side of the window.
prependToRight: (element) ->
@vertical.after(element)
# Public: Appends the element to the right side of the window.
appendToRight: (element) ->
@horizontal.append(element)
# Public: Returns the currently focused {Pane}.
getActivePane: ->
@panes.getActivePane()