;(function() { 'use strict' /* global define */ var esprima var exportFn var toString = Object.prototype.toString if (typeof module === 'object' && typeof module.exports === 'object' && typeof require === 'function') { // server side esprima = require('esprima') exportFn = function(redeyed) { module.exports = redeyed } bootstrap(esprima, exportFn) } else if (typeof define === 'function' && define.amd) { // client side // amd define(['esprima'], function(esprima) { return bootstrap(esprima) }) } else if (typeof window === 'object') { // no amd -> attach to window if it exists // Note that this requires 'esprima' to be defined on the window, so that script has to be loaded first window.redeyed = bootstrap(window.esprima) } function bootstrap(esprima, exportFn) { function isFunction(obj) { return toString.call(obj) === '[object Function]' } function isString(obj) { return toString.call(obj) === '[object String]' } function isObject(obj) { return toString.call(obj) === '[object Object]' } function surroundWith(before, after) { return function(s) { return before + s + after } } function isNonCircular(key) { return key !== '_parent' } function objectizeString(value) { var vals = value.split(':') if (vals.length === 0 || vals.length > 2) { throw new Error( 'illegal string config: ' + value + '\nShould be of format "before:after"' ) } if (vals.length === 1 || vals[1].length === 0) { return vals.indexOf(':') < 0 ? { _before: vals[0] } : { _after: vals[0] } } else { return { _before: vals[0], _after: vals[1] } } } function objectize(node) { // Converts 'bef:aft' to { _before: bef, _after: aft } // and resolves undefined before/after from parent or root function resolve(value, key) { // resolve before/after from root or parent if it isn't present on the current node if (!value._parent) return undefined // Immediate parent if (value._parent._default && value._parent._default[key]) return value._parent._default[key] // Root var root = value._parent._parent if (!root) return undefined return root._default ? root._default[key] : undefined } function process(key) { var value = node[key] if (!value) return if (isFunction(value)) return // normalize all strings to objects if (isString(value)) { node[key] = value = objectizeString(value) } value._parent = node if (isObject(value)) { if (!value._before && !value._after) return objectize(value) // resolve missing _before or _after from parent(s) // in case we only have either one on this node value._before = value._before || resolve(value, '_before') value._after = value._after || resolve(value, '_after') return } throw new Error('nodes need to be either {String}, {Object} or {Function}.' + value + ' is neither.') } // Process _default ones first so children can resolve missing before/after from them if (node._default) process('_default') Object.keys(node) .filter(function(key) { return isNonCircular(key) && node.hasOwnProperty(key) && key !== '_before' && key !== '_after' && key !== '_default' }) .forEach(process) } function functionize(node) { Object.keys(node) .filter(function(key) { return isNonCircular(key) && node.hasOwnProperty(key) }) .forEach(function(key) { var value = node[key] if (isFunction(value)) return if (isObject(value)) { if (!value._before && !value._after) return functionize(value) // at this point before/after were "inherited" from the parent or root // (see objectize) var before = value._before || '' var after = value._after || '' node[key] = surroundWith(before, after) return node[key] } }) } function normalize(root) { objectize(root) functionize(root) } function mergeTokensAndComments(tokens, comments) { var all = {} function addToAllByRangeStart(t) { all[ t.range[0] ] = t } tokens.forEach(addToAllByRangeStart) comments.forEach(addToAllByRangeStart) // keys are sorted automatically return Object.keys(all) .map(function(k) { return all[k] }) } function redeyed(code, config, opts) { opts = opts || {} var parser = opts.parser || esprima var jsx = !!opts.jsx // tokenizer doesn't support JSX at this point (esprima@4.0.0) // therefore we need to generate the AST via the parser not only to // avoid the tokenizer from erroring but also to get JSXIdentifier tokens var buildAst = jsx || !!opts.buildAst var hashbang = '' var ast var tokens var comments var lastSplitEnd = 0 var splits = [] var transformedCode var all var info // Replace hashbang line with empty whitespaces to preserve token locations if (code[0] === '#' && code[1] === '!') { hashbang = code.substr(0, code.indexOf('\n') + 1) code = Array.apply(0, Array(hashbang.length)).join(' ') + '\n' + code.substr(hashbang.length) } if (buildAst) { ast = parser.parse(code, { tokens: true, comment: true, range: true, loc: true, tolerant: true, jsx: true }) tokens = ast.tokens comments = ast.comments } else { tokens = [] comments = [] parser.tokenize(code, { range: true, loc: true, comment: true }, function(token) { if (token.type === 'LineComment') { token.type = 'Line' comments.push(token) } else if (token.type === 'BlockComment') { token.type = 'Block' comments.push(token) } else { // Optimistically upgrade 'static' to a keyword if (token.type === 'Identifier' && token.value === 'static') token.type = 'Keyword' tokens.push(token) } }) } normalize(config) function tokenIndex(tokens, tkn, start) { var current var rangeStart = tkn.range[0] for (current = start; current < tokens.length; current++) { if (tokens[current].range[0] === rangeStart) return current } throw new Error('Token %s not found at or after index: %d', tkn, start) } function process(surround) { var result var currentIndex var nextIndex var skip = 0 var splitEnd result = surround(code.slice(start, end), info) if (isObject(result)) { splits.push(result.replacement) currentIndex = info.tokenIndex nextIndex = tokenIndex(info.tokens, result.skipPastToken, currentIndex) skip = nextIndex - currentIndex splitEnd = skip > 0 ? tokens[nextIndex - 1].range[1] : end } else { splits.push(result) splitEnd = end } return { skip: skip, splitEnd: splitEnd } } function addSplit(start, end, surround, info) { var result var skip = 0 if (start >= end) return if (surround) { result = process(surround) skip = result.skip lastSplitEnd = result.splitEnd } else { splits.push(code.slice(start, end)) lastSplitEnd = end } return skip } all = mergeTokensAndComments(tokens, comments) for (var tokenIdx = 0; tokenIdx < all.length; tokenIdx++) { var token = all[tokenIdx] var surroundForType = config[token.type] var surround var start var end // At least the type (e.g., 'Keyword') needs to be specified for the token to be surrounded if (surroundForType) { // root defaults are only taken into account while resolving before/after otherwise // a root default would apply to everything, even if no type default was specified surround = surroundForType && surroundForType.hasOwnProperty(token.value) && surroundForType[token.value] && isFunction(surroundForType[token.value]) ? surroundForType[token.value] : surroundForType._default start = token.range[0] end = token.range[1] addSplit(lastSplitEnd, start) info = { tokenIndex: tokenIdx, tokens: all, ast: ast, code: code } tokenIdx += addSplit(start, end, surround, info) } } if (lastSplitEnd < code.length) { addSplit(lastSplitEnd, code.length) } if (!opts.nojoin) { transformedCode = splits.join('') if (hashbang.length > 0) { transformedCode = hashbang + transformedCode.substr(hashbang.length) } } return { ast : ast , tokens : tokens , comments : comments , splits : splits , code : transformedCode } } return exportFn ? exportFn(redeyed) : redeyed } })()