"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.stripIgnoredCharacters = stripIgnoredCharacters; var _inspect = _interopRequireDefault(require("../jsutils/inspect")); var _source = require("../language/source"); var _tokenKind = require("../language/tokenKind"); var _lexer = require("../language/lexer"); var _blockString = require("../language/blockString"); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } /** * Strips characters that are not significant to the validity or execution * of a GraphQL document: * - UnicodeBOM * - WhiteSpace * - LineTerminator * - Comment * - Comma * - BlockString indentation * * Note: It is required to have a delimiter character between neighboring * non-punctuator tokens and this function always uses single space as delimiter. * * It is guaranteed that both input and output documents if parsed would result * in the exact same AST except for nodes location. * * Warning: It is guaranteed that this function will always produce stable results. * However, it's not guaranteed that it will stay the same between different * releases due to bugfixes or changes in the GraphQL specification. * * Query example: * * query SomeQuery($foo: String!, $bar: String) { * someField(foo: $foo, bar: $bar) { * a * b { * c * d * } * } * } * * Becomes: * * query SomeQuery($foo:String!$bar:String){someField(foo:$foo bar:$bar){a b{c d}}} * * SDL example: * * """ * Type description * """ * type Foo { * """ * Field description * """ * bar: String * } * * Becomes: * * """Type description""" type Foo{"""Field description""" bar:String} */ function stripIgnoredCharacters(source) { var sourceObj = typeof source === 'string' ? new _source.Source(source) : source; if (!(sourceObj instanceof _source.Source)) { throw new TypeError("Must provide string or Source. Received: ".concat((0, _inspect.default)(sourceObj))); } var body = sourceObj.body; var lexer = (0, _lexer.createLexer)(sourceObj); var strippedBody = ''; var wasLastAddedTokenNonPunctuator = false; while (lexer.advance().kind !== _tokenKind.TokenKind.EOF) { var currentToken = lexer.token; var tokenKind = currentToken.kind; /** * Every two non-punctuator tokens should have space between them. * Also prevent case of non-punctuator token following by spread resulting * in invalid token (e.g. `1...` is invalid Float token). */ var isNonPunctuator = !(0, _lexer.isPunctuatorToken)(currentToken); if (wasLastAddedTokenNonPunctuator) { if (isNonPunctuator || currentToken.kind === _tokenKind.TokenKind.SPREAD) { strippedBody += ' '; } } var tokenBody = body.slice(currentToken.start, currentToken.end); if (tokenKind === _tokenKind.TokenKind.BLOCK_STRING) { strippedBody += dedentBlockString(tokenBody); } else { strippedBody += tokenBody; } wasLastAddedTokenNonPunctuator = isNonPunctuator; } return strippedBody; } function dedentBlockString(blockStr) { // skip leading and trailing triple quotations var rawStr = blockStr.slice(3, -3); var body = (0, _blockString.dedentBlockStringValue)(rawStr); var lines = body.split(/\r\n|[\n\r]/g); if ((0, _blockString.getBlockStringIndentation)(lines) > 0) { body = '\n' + body; } var lastChar = body[body.length - 1]; var hasTrailingQuote = lastChar === '"' && body.slice(-4) !== '\\"""'; if (hasTrailingQuote || lastChar === '\\') { body += '\n'; } return '"""' + body + '"""'; }