/** * Formats the time gap as a string, using the specified format. Padding the left hand side of * numbers with zeroes is optional and the timezone may be specified. * * <p>This method formats durations using the days and lower fields of the format pattern. Months * and larger are not used. * * @param durationMillis the duration to format * @param format the way in which to format the duration * @param padWithZeros whether to pad the left hand side of numbers with 0's * @return the time as a String */ public static String formatDuration(long durationMillis, String format, boolean padWithZeros) { Token[] tokens = lexx(format); int days = 0; int hours = 0; int minutes = 0; int seconds = 0; int milliseconds = 0; if (Token.containsTokenWithValue(tokens, d)) { days = (int) (durationMillis / DateUtils.MILLIS_PER_DAY); durationMillis = durationMillis - (days * DateUtils.MILLIS_PER_DAY); } if (Token.containsTokenWithValue(tokens, H)) { hours = (int) (durationMillis / DateUtils.MILLIS_PER_HOUR); durationMillis = durationMillis - (hours * DateUtils.MILLIS_PER_HOUR); } if (Token.containsTokenWithValue(tokens, m)) { minutes = (int) (durationMillis / DateUtils.MILLIS_PER_MINUTE); durationMillis = durationMillis - (minutes * DateUtils.MILLIS_PER_MINUTE); } if (Token.containsTokenWithValue(tokens, s)) { seconds = (int) (durationMillis / DateUtils.MILLIS_PER_SECOND); durationMillis = durationMillis - (seconds * DateUtils.MILLIS_PER_SECOND); } if (Token.containsTokenWithValue(tokens, S)) { milliseconds = (int) durationMillis; } return format(tokens, 0, 0, days, hours, minutes, seconds, milliseconds, padWithZeros); }
public String getAdminToken() { String adminToken = curAdminToken != null && curAdminToken.isValid() ? curAdminToken.getToken() : null; if (adminToken == null) { final ServiceClientResponse<AuthenticateResponse> serviceResponse = client.post(authUrl + "/tokens", requestBody, MediaType.APPLICATION_XML_TYPE); switch (HttpStatusCode.fromInt(serviceResponse.getStatusCode())) { case OK: final AuthenticateResponse authenticateResponse = marshaller.unmarshall(serviceResponse.getData(), AuthenticateResponse.class); Token token = authenticateResponse.getToken(); curAdminToken = new AdminToken(token.getId(), token.getExpires().toGregorianCalendar()); adminToken = curAdminToken.getToken(); break; default: LOG.error( "Unable to get admin token. Verify admin credentials. " + serviceResponse.getStatusCode()); curAdminToken = null; break; } } return adminToken; }
/** * Gets the next token from a tokenizer and converts it to a string. * * @return The next token in the stream, as a string. * @throws TextParseException The input was invalid or not a string. * @throws IOException An I/O error occurred. */ public String getString() throws IOException { Token next = get(); if (!next.isString()) { throw exception("expected a string"); } return next.value; }
/** Prints the content and annotations. */ public String toString() { StringBuffer sb = new StringBuffer(); sb.append(" SENTENCE begin\n"); for (Token tok : tokens) sb.append(tok.toString()); sb.append(" SENTENCE end\n"); return sb.toString(); }
private final Token jj_consume_token(int i) throws ParseException { Token token1 = token; if (token1.next != null) { token = token.next; } else { Token token2 = token; Token token3 = token_source.getNextToken(); token2.next = token3; token = token3; } jj_ntk = -1; if (token.kind == i) { jj_gen = jj_gen + 1; return token; } else { token = token1; jj_kind = i; throw generateParseException(); } }
/** * Modifies the passed-in token list to start at the specified offset. For example, if the token * list covered positions 20-60 in the document (inclusive) like so: * * <pre> * [token1] -> [token2] -> [token3] -> [token4] * 20 30 31 40 41 50 51 60 * </pre> * * and you used this method to make the token list start at position 44, then the token list would * be modified to be the following: * * <pre> * [part-of-old-token3] -> [token4] * 44 50 51 60 * </pre> * * Tokens that come before the specified position are forever lost, and the token containing that * position is made to begin at that position if necessary. All token types remain the same as * they were originally. * * <p>This method can be useful if you are only interested in part of a token list (i.e., the line * it represents), but you don't want to modify the token list yourself. * * @param tokenList The list to make start at the specified position. This parameter is modified. * @param pos The position at which the new token list is to start. If this position is not in the * passed-in token list, returned token list will either be <code>null</code> or the * unpaintable token(s) at the end of the passed-in token list. * @param e How to expand tabs. * @param textArea The text area from which the token list came. * @param x0 The initial x-pixel position of the old token list. * @return The width, in pixels, of the part of the token list "removed from the front." This way, * you know the x-offset of the "new" token list. */ public static float makeTokenListStartAt( Token tokenList, int pos, TabExpander e, final RSyntaxTextArea textArea, float x0) { Token t = tokenList; // Loop through the token list until you find the one that contains // pos. Remember the cumulative width of all of these tokens. while (t != null && t.isPaintable() && !t.containsPosition(pos)) { x0 += t.getWidth(textArea, e, x0); t = t.getNextToken(); } // Make the token that contains pos start at pos. if (t != null && t.isPaintable() && t.offset != pos) { // Number of chars between p0 and token start. int difference = pos - t.offset; x0 += t.getWidthUpTo(t.textCount - difference + 1, textArea, e, x0); t.makeStartAt(pos); } // Make the passed-in token list point to the proper place. // t can be null, for example, if line ends with unended MLC. if (t != null && t.isPaintable()) tokenList.copyFrom(t); else tokenList = null; t = null; // Return the x-offset (in pixels) of the newly-modified t. return x0; }
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { String channelid = request.getParameter("channelid"); String tokenId = request.getParameter("token"); // check if token exists in the request attribute Session session = null; Token token = (Token) request.getAttribute("TOKEN"); if (token == null) { session = sockets.getSession(channelid); token = session.findToken(tokenId); if (token == null) token = session.add(tokenId); request.setAttribute("TOKEN", token); } else { session = token.getSession(); } if (token.hasMessages()) { String msg = token.getMessage(); writeMessage(response, msg); } else { Continuation continuation = ContinuationSupport.getContinuation(request); if (continuation.isInitial()) { // No chat in queue, so suspend and wait for timeout or chat continuation.setTimeout(20000); continuation.suspend(); token.setContinuation(continuation); } else { writeMessage(response, "{status: 'closed'}"); } } }
@Override public int _handleToken(ParseContext ctx) throws InvalidQueryException { Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()]; ctx.getPrecedingExpression().setLeftOperand(token.getValue()); return 1; }
private final Token makeTokenSingular(Token token) { final String effectivePattern = token.getEffectivePattern(); if (Segment.GLOB_PATTERN.equals(effectivePattern)) { token = new Token(token.getParameterName(), token.getOriginalPattern(), Segment.STAR_PATTERN); } return token; }
public NTablePartition(Token start, TString[] fields, TInt size, TInt limit) { this.fields = fields; this.size = size; this.limit = limit; this.startLine = start.startLine(); this.startCol = start.startCol(); }
@Override public int _handleToken(ParseContext ctx) throws InvalidQueryException { Token[] tokens = ctx.getTokens(); int idx = ctx.getCurrentTokensIndex(); Token token = tokens[idx]; RelationalOperator relationalOp = RelationalOperatorFactory.createOperator(token.getValue()); ctx.addExpression(new RelationalExpression(relationalOp)); ctx.setCurrentTokensIndex(++idx); TokenHandler propertyHandler = new PropertyOperandTokenHandler(); propertyHandler.handleToken(ctx); // handle right operand if applicable to operator idx = ctx.getCurrentTokensIndex(); if (ctx.getCurrentTokensIndex() < tokens.length && tokens[idx].getType().equals(Token.TYPE.VALUE_OPERAND)) { TokenHandler valueHandler = new ValueOperandTokenHandler(); valueHandler.handleToken(ctx); } // skip closing bracket idx = ctx.getCurrentTokensIndex(); if (idx >= tokens.length || tokens[idx].getType() != Token.TYPE.BRACKET_CLOSE) { throw new InvalidQueryException("Missing closing bracket for in expression."); } return 1; }
/** * Consume and return the {@linkplain #getCurrentToken current symbol}. * * <p>E.g., given the following input with {@code A} being the current lookahead symbol, this * function moves the cursor to {@code B} and returns {@code A}. * * <pre> * A B * ^ * </pre> * * If the parser is not in error recovery mode, the consumed symbol is added to the parse tree * using {@link ParserRuleContext#addChild(Token)}, and {@link ParseTreeListener#visitTerminal} is * called on any parse listeners. If the parser <em>is</em> in error recovery mode, the consumed * symbol is added to the parse tree using {@link ParserRuleContext#addErrorNode(Token)}, and * {@link ParseTreeListener#visitErrorNode} is called on any parse listeners. */ public Token consume() { Token o = getCurrentToken(); if (o.getType() != EOF) { getInputStream().consume(); } boolean hasListener = _parseListeners != null && !_parseListeners.isEmpty(); if (_buildParseTrees || hasListener) { if (_errHandler.inErrorRecoveryMode(this)) { ErrorNode node = _ctx.addErrorNode(o); if (_parseListeners != null) { for (ParseTreeListener listener : _parseListeners) { listener.visitErrorNode(node); } } } else { TerminalNode node = _ctx.addChild(o); if (_parseListeners != null) { for (ParseTreeListener listener : _parseListeners) { listener.visitTerminal(node); } } } } return o; }
/** * Formats the time gap as a string, using the specified format. Padding the left hand side of * numbers with zeroes is optional. * * <p>This method formats durations using the days and lower fields of the format pattern. Months * and larger are not used. * * @param durationMillis the duration to format * @param format the way in which to format the duration, not null * @param padWithZeros whether to pad the left hand side of numbers with 0's * @return the formatted duration, not null * @throws java.lang.IllegalArgumentException if durationMillis is negative */ public static String formatDuration( final long durationMillis, final String format, final boolean padWithZeros) { Validate.inclusiveBetween( 0, Long.MAX_VALUE, durationMillis, "durationMillis must not be negative"); final Token[] tokens = lexx(format); long days = 0; long hours = 0; long minutes = 0; long seconds = 0; long milliseconds = durationMillis; if (Token.containsTokenWithValue(tokens, d)) { days = milliseconds / DateUtils.MILLIS_PER_DAY; milliseconds = milliseconds - (days * DateUtils.MILLIS_PER_DAY); } if (Token.containsTokenWithValue(tokens, H)) { hours = milliseconds / DateUtils.MILLIS_PER_HOUR; milliseconds = milliseconds - (hours * DateUtils.MILLIS_PER_HOUR); } if (Token.containsTokenWithValue(tokens, m)) { minutes = milliseconds / DateUtils.MILLIS_PER_MINUTE; milliseconds = milliseconds - (minutes * DateUtils.MILLIS_PER_MINUTE); } if (Token.containsTokenWithValue(tokens, s)) { seconds = milliseconds / DateUtils.MILLIS_PER_SECOND; milliseconds = milliseconds - (seconds * DateUtils.MILLIS_PER_SECOND); } return format(tokens, 0, 0, days, hours, minutes, seconds, milliseconds, padWithZeros); }
@Test public void shouldExtractTokenFromOAuthStandardResponse() { String response = "oauth_token=hh5s93j4hdidpola&oauth_token_secret=hdhd0244k9j7ao03"; Token extracted = extractor.extract(response); assertEquals("hh5s93j4hdidpola", extracted.getToken()); assertEquals("hdhd0244k9j7ao03", extracted.getSecret()); }
public String toString(Token t) { if (t.getKind().hasPayload()) { return t.stringValue(); } else { return t.kind.toString().toLowerCase(); } }
/** * parse the String message * * @return SIPHeader (Event object) * @throws SIPParseException if the message does not respect the spec. */ public SIPHeader parse() throws ParseException { if (debug) dbg_enter("EventParser.parse"); try { headerName(TokenTypes.EVENT); this.lexer.SPorHT(); Event event = new Event(); this.lexer.match(TokenTypes.ID); Token token = lexer.getNextToken(); String value = token.getTokenValue(); event.setEventType(value); super.parse(event); this.lexer.SPorHT(); this.lexer.match('\n'); return event; } catch (ParseException ex) { throw createParseException(ex.getMessage()); } finally { if (debug) dbg_leave("EventParser.parse"); } }
/** * Convert an arithmetic expression in infix notation (e.g 1+2) to postfix notation (e.g 12+) Only * simple expressions not containing brackets are supported. <br> * A description of the algorithm used to accomplish this task can be found at: * http://scriptasylum.com/tutorials/infix_postfix/algorithms/infix-postfix/index.htm <br> * Basically the list of {@code Tokens} is being scanned from left to right, all operands * (numbers) are pushed onto a {@code Stack}. If the {@code Token} is an operator it is compared * to the operator on top of the {@code Stack}. If the operator on top of the {@code Stack} has a * higher or equal weight than the current operator, the top of the {@code Stack} is pushed to the * postfix list. Therefore the operators with higher weight are going to be executed first when * the expression is evaluated. This process continues until the top of the {@code Stack} has a * lower weight than the current operator or the {@code Stack} is empty. Then the current operator * is pushed onto the {@code Stack}. * * @param tokenizer The {@code Tokenizer} which will be converted to postfix. It must not be * {@code null}. * @return The resulting {@code List} of {@code Token}s in postfix order. */ private static List<Token> postfix(Tokenizer tokenizer) { Stack<Token> stack = new Stack<>(); List<Token> postfix = new LinkedList<>(); BiPredicate<Token, Token> hasHigherOrEqualWeight = (t1, t2) -> Operators.getOpWeight(t1.getOperator()) - Operators.getOpWeight(t2.getOperator()) >= 0; while (tokenizer.hasNext()) { Token t = tokenizer.next(); switch (t.getType()) { case NUMBER: postfix.add(t); break; case OPERATOR: if (!stack.isEmpty() && hasHigherOrEqualWeight.test(stack.peek(), t)) { while (!stack.isEmpty() && hasHigherOrEqualWeight.test(stack.peek(), t)) { postfix.add(stack.pop()); } stack.push(t); } else { stack.push(t); } break; case OPENING_BRACKET: case CLOSING_BRACKET: throw new IllegalArgumentException( "Cannot create postfix expression if the source contains brackets."); } } while (!stack.isEmpty()) postfix.add(stack.pop()); return postfix; }
private boolean isSingeLineComment() { if (m_CurrChar.getString().equals("/") && m_NextChar.getString().equals("/")) { m_InComment = true; return true; } return false; }
/** * Returns the bounding box (in the current view) of a specified position in the model. This * method is designed for line-wrapped views to use, as it allows you to specify a "starting * position" in the line, from which the x-value is assumed to be zero. The idea is that you * specify the first character in a physical line as <code>p0</code>, as this is the character * where the x-pixel value is 0. * * @param textArea The text area containing the text. * @param s A segment in which to load the line. This is passed in so we don't have to reallocate * a new <code>Segment</code> for each call. * @param p0 The starting position in the physical line in the document. * @param p1 The position for which to get the bounding box in the view. * @param e How to expand tabs. * @param rect The rectangle whose x- and width-values are changed to represent the bounding box * of <code>p1</code>. This is reused to keep from needlessly reallocating Rectangles. * @param x0 The x-coordinate (pixel) marking the left-hand border of the text. This is useful if * the text area has a border, for example. * @return The bounding box in the view of the character <code>p1</code>. * @throws BadLocationException If <code>p0</code> or <code>p1</code> is not a valid location in * the specified text area's document. * @throws IllegalArgumentException If <code>p0</code> and <code>p1</code> are not on the same * line. */ public static Rectangle getLineWidthUpTo( RSyntaxTextArea textArea, Segment s, int p0, int p1, TabExpander e, Rectangle rect, int x0) throws BadLocationException { RSyntaxDocument doc = (RSyntaxDocument) textArea.getDocument(); // Ensure p0 and p1 are valid document positions. if (p0 < 0) throw new BadLocationException("Invalid document position", p0); else if (p1 > doc.getLength()) throw new BadLocationException("Invalid document position", p1); // Ensure p0 and p1 are in the same line, and get the start/end // offsets for that line. Element map = doc.getDefaultRootElement(); int lineNum = map.getElementIndex(p0); // We do ">1" because p1 might be the first position on the next line // or the last position on the previous one. // if (lineNum!=map.getElementIndex(p1)) if (Math.abs(lineNum - map.getElementIndex(p1)) > 1) throw new IllegalArgumentException( "p0 and p1 are not on the " + "same line (" + p0 + ", " + p1 + ")."); // Get the token list. Token t = doc.getTokenListForLine(lineNum); // Modify the token list 't' to begin at p0 (but still have correct // token types, etc.), and get the x-location (in pixels) of the // beginning of this new token list. makeTokenListStartAt(t, p0, e, textArea, 0); rect = t.listOffsetToView(textArea, e, p1, x0, rect); return rect; }
public Token next() { try { if (m_NextNext.isEmpty() == false) { Token next = m_NextNext.get(0); m_NextNext.remove(0); m_Ret = new StringBuilder(); return next; } if (m_CurrChar.isImmutable()) { Token ret1 = m_CurrChar; read(); m_Ret = new StringBuilder(); return ret1; } while (shouldTrim(m_CurrChar.getString())) read(); if (m_CurrChar.isImmutable()) { Token ret1 = m_CurrChar; read(); m_Ret = new StringBuilder(); return ret1; } Token ret = doNext(); while (shouldTrim(m_CurrChar.getString())) read(); m_Ret = new StringBuilder(); return ret; } catch (Exception ex) { m_EndOfStream = true; if (m_Ret == null) m_Ret = new StringBuilder(); String ret_string = m_Ret.toString(); Token ret = new Token(ret_string.trim()); return ret; } }
private void chooseTokenFlow(BaseElement element, Token token, boolean processElementItself) { if (processElementItself) { nextElement = element; } else { if (element.getOutFlows().size() > 1) { chooseFlowBasedOnPropablity( element, new IChoosenFlow() { @Override public void process(SequenceFlow flow) { nextElement = flow.getTarget(); } }); } else { nextElement = element.getOutFlows().get(0).getTarget(); } } logger.debug("Chosen flow: " + nextElement.getName()); if (nextElement instanceof Task) { Task targetTask = (Task) nextElement; token.task = targetTask; ResourceTypeVariant resourceTypeVariant = targetTask.getVariant(); tokensWaiting.get(resourceTypeVariant).add(token); } else if (nextElement instanceof Gateway) { chooseTokenFlow(nextElement, token); } else if (nextElement instanceof ParallelGateway) { ParallelGateway parallelGateway = (ParallelGateway) nextElement; if (parallelGateway.isDiverging()) { for (SequenceFlow flow : parallelGateway.getOutFlows()) { MultiToken subToken = new MultiToken(); subToken.originalToken = token; chooseTokenFlow(flow.getTarget(), subToken, true); } } else { Map<Token, Integer> arrivedTokensMap = arrivedTokens.get(parallelGateway); Token originalToken = ((MultiToken) token).originalToken; Integer arrivedTokensNumber = arrivedTokensMap.get(originalToken); if (arrivedTokensNumber == null) { arrivedTokensNumber = Integer.valueOf(0); } if (arrivedTokensNumber == parallelGateway.getInFlows().size() - 1) { chooseTokenFlow(parallelGateway, originalToken); } else { arrivedTokensMap.put(originalToken, arrivedTokensNumber + 1); } } } else if (nextElement instanceof EndEvent) { token.endTime = currentTime; tokensFinished++; logger.debug( tokensFinished + ". token finished at " + currentTime + " in " + (token.endTime - token.startTime)); } }
@Override public String toString() { StringBuilder sb = new StringBuilder(); for (Token t : tokens) sb.append(t.toString()).append(" String:").append(t.getString()).append("\n"); return sb.toString(); }
/** Return a token from this source; i.e., match a token on the char stream. */ public Token nextToken() { while (true) { state.token = null; state.channel = Token.DEFAULT_CHANNEL; state.tokenStartCharIndex = input.index(); state.tokenStartCharPositionInLine = input.getCharPositionInLine(); state.tokenStartLine = input.getLine(); state.text = null; if (input.LA(1) == CharStream.EOF) { Token eof = new CommonToken( (CharStream) input, Token.EOF, Token.DEFAULT_CHANNEL, input.index(), input.index()); eof.setLine(getLine()); eof.setCharPositionInLine(getCharPositionInLine()); return eof; } try { mTokens(); if (state.token == null) { emit(); } else if (state.token == Token.SKIP_TOKEN) { continue; } return state.token; } catch (NoViableAltException nva) { reportError(nva); recover(nva); // throw out current char and try again } catch (RecognitionException re) { reportError(re); // match() routine has already called recover() } } }
public void testSelfIssuedToken3() throws InfoCardProcessingException, CryptoException { String petitTokenStr = "<root><saml:Assertion xmlns:saml=\"urn:oasis:names:tc:SAML:1.0:assertion\" MajorVersion=\"1\" MinorVersion=\"1\" AssertionID=\"uuid-E94A4623-422A-D4ED-242A-C8893146A338\" Issuer=\"http://schemas.xmlsoap.org/ws/2005/05/identity/issuer/self\" IssueInstant=\"2008-04-02T17:07:15Z\"><saml:Conditions NotBefore=\"2008-04-02T17:02:15Z\" NotOnOrAfter=\"2008-04-02T17:17:15Z\"><saml:AudienceRestrictionCondition><saml:Audience>http://localhost:8080/relyingparty/</saml:Audience></saml:AudienceRestrictionCondition></saml:Conditions>" + "<saml:AttributeStatement><saml:Subject><saml:SubjectConfirmation><saml:ConfirmationMethod>urn:oasis:names:tc:SAML:1.0:cm:bearer</saml:ConfirmationMethod></saml:SubjectConfirmation></saml:Subject><saml:Attribute AttributeName=\"givenname\" AttributeNamespace=\"http://schemas.xmlsoap.org/ws/2005/05/identity/claims\"><saml:AttributeValue>Patrick</saml:AttributeValue></saml:Attribute><saml:Attribute AttributeName=\"surname\" AttributeNamespace=\"http://schemas.xmlsoap.org/ws/2005/05/identity/claims\"><saml:AttributeValue>Petit</saml:AttributeValue></saml:Attribute><saml:Attribute AttributeName=\"emailaddress\" AttributeNamespace=\"http://schemas.xmlsoap.org/ws/2005/05/identity/claims\"><saml:AttributeValue>[email protected]</saml:AttributeValue></saml:Attribute><saml:Attribute AttributeName=\"privatepersonalidentifier\" AttributeNamespace=\"http://schemas.xmlsoap.org/ws/2005/05/identity/claims\"><saml:AttributeValue>cG9FbmlTV2dVblQ4dHVRRElqSVpWRU1GdE9JUEUrZS9EcnVMb1ZUdGRrST0=</saml:AttributeValue></saml:Attribute></saml:AttributeStatement>" + "<dsig:Signature xmlns:dsig=\"http://www.w3.org/2000/09/xmldsig#\"><dsig:SignedInfo><dsig:CanonicalizationMethod Algorithm=\"http://www.w3.org/2001/10/xml-exc-c14n#\" /><dsig:SignatureMethod Algorithm=\"http://www.w3.org/2000/09/xmldsig#rsa-sha1\" /><dsig:Reference URI=\"#uuid-E94A4623-422A-D4ED-242A-C8893146A338\"><dsig:Transforms><dsig:Transform Algorithm=\"http://www.w3.org/2000/09/xmldsig#enveloped-signature\" /><dsig:Transform Algorithm=\"http://www.w3.org/2001/10/xml-exc-c14n#\" /></dsig:Transforms><dsig:DigestMethod Algorithm=\"http://www.w3.org/2000/09/xmldsig#sha1\" /><dsig:DigestValue>2ToPlLCRarT6Nda1hoY3kkVJSe0=</dsig:DigestValue></dsig:Reference></dsig:SignedInfo><dsig:SignatureValue>CRkt12uDe1kLxmdty6tugG4Yi3mcbefhVvNBggWKxLUFakTJ/7zz5L6BPmnrU+bs9+o7QhH8wYBt6KidtynbCtKY8SwlxMMc+8Qbu1r0uurS+UxGkN5p30QlomQ1BVjfKd5zmr3mKvNXZpVwqE9FG8343AfGGB3KpoRYAH9Ivk5BH1cF1EYaNNytF4WPmkdwkfXr5/kxyf526564XUFPrSmz86BTyksGZfD6D3UAHenps3IdfgpIzv1Y3wOLJADZdxHJxmBI7qZ31wIMAGUhkKUQGfmoe677ICkHBOPMyQszycIrR9FD87HzRKe6hhSc5h3DmQvuJ111KM7suSRwpA==</dsig:SignatureValue><dsig:KeyInfo><dsig:KeyValue><dsig:RSAKeyValue><dsig:Modulus>l6OIACU8lEN+m6XawDTJRHAZlaMSAcz0pgtxBoqtpxIdl1YjKJ4HyOz3rMlnOMk8n43Y5SLMu4p5G09Pr6gIz25FwOSctFtflvmGEHczScYvtEgjrybBE+nrWcrIORuqpgCJ1mbG0/GSFsClI70k5rgHtL7M9Zha3NyAQUUyUcbvpYrR04+BGkQwyrOP7g/l191laJizLtIuA/OJgjM5dhXt2hjMRUkDImQvW2L9U/UM5SvXp6ecVXkYnwVDtDDdjaV6p5jPY8HjJKtBGsvqCtYfjNWiCZL/Bw90/JMW7blqrAa42BviPl9/wIHpvRM4q2mYEZFL8mbwqRxSz9OYnQ==</dsig:Modulus><dsig:Exponent>AQAB</dsig:Exponent></dsig:RSAKeyValue></dsig:KeyValue></dsig:KeyInfo></dsig:Signature>" + "</saml:Assertion></root>"; // "http://www.w3.org/2000/09/xmldsig#" Token token = new Token(petitTokenStr, null); assertFalse(token.isEncrypted()); boolean threw = false; try { /*boolean falsch =*/ token.isSignatureValid(); } catch (InfoCardProcessingException e) { threw = true; } assertTrue(threw); threw = false; try { String digest = token.getClientDigest(); assertEquals("tAhEE404bgkTVqhLmqf0ZmNpsEE=", digest); } catch (CryptoException e) { threw = true; } assertTrue(threw); }
/** * Parse a term. * * @param token the initial token. * @return the root of the generated parse subtree. * @throws Exception if an error occurred. */ private ICodeNode parseTerm(Token token) throws Exception { // Parse a factor and make its node the root node. ICodeNode rootNode = parseFactor(token); token = currentToken(); TokenType tokenType = token.getType(); // Loop over multiplicative operators. while (MULT_OPS.contains(tokenType)) { // Create a new operator node and adopt the current tree // as its first child. ICodeNodeType nodeType = MULT_OPS_OPS_MAP.get(tokenType); ICodeNode opNode = ICodeFactory.createICodeNode(nodeType); opNode.addChild(rootNode); token = nextToken(); // consume the operator // Parse another factor. The operator node adopts // the term's tree as its second child. opNode.addChild(parseFactor(token)); // The operator node becomes the new root node. rootNode = opNode; token = currentToken(); tokenType = token.getType(); } return rootNode; }
/** * Eat an identifier, possibly qualified (meaning that it is dotted). TODO AndyC Could create * complete identifiers (a.b.c) here rather than a sequence of them? (a, b, c) */ private SpelNodeImpl eatPossiblyQualifiedId() { LinkedList<SpelNodeImpl> qualifiedIdPieces = new LinkedList<SpelNodeImpl>(); Token node = peekToken(); while (isValidQualifiedId(node)) { nextToken(); if (node.kind != TokenKind.DOT) { qualifiedIdPieces.add(new Identifier(node.stringValue(), toPos(node))); } node = peekToken(); } if (qualifiedIdPieces.isEmpty()) { if (node == null) { raiseInternalException(expressionString.length(), SpelMessage.OOD); } raiseInternalException( node.startpos, SpelMessage.NOT_EXPECTED_TOKEN, "qualified ID", node.getKind().toString().toLowerCase()); } int pos = toPos( qualifiedIdPieces.getFirst().getStartPosition(), qualifiedIdPieces.getLast().getEndPosition()); return new QualifiedIdentifier( pos, qualifiedIdPieces.toArray(new SpelNodeImpl[qualifiedIdPieces.size()])); }
public SIPHeader parse() throws ParseException { ContentType contentType = new ContentType(); if (debug) dbg_enter("ContentTypeParser.parse"); try { this.headerName(TokenTypes.CONTENT_TYPE); // The type: lexer.match(TokenTypes.ID); Token type = lexer.getNextToken(); this.lexer.SPorHT(); contentType.setContentType(type.getTokenValue()); // The sub-type: lexer.match('/'); lexer.match(TokenTypes.ID); Token subType = lexer.getNextToken(); this.lexer.SPorHT(); contentType.setContentSubType(subType.getTokenValue()); super.parse(contentType); this.lexer.match('\n'); } finally { if (debug) dbg_leave("ContentTypeParser.parse"); } return contentType; }
private boolean peekIdentifierToken(String identifierString) { if (!moreTokens()) { return false; } Token t = peekToken(); return t.kind == TokenKind.IDENTIFIER && t.stringValue().equalsIgnoreCase(identifierString); }
@Override public void run() { while (running) { if (queue.size() == 0) { try { Thread.sleep(20); } catch (InterruptedException ex) { // üres } continue; } Token aToken = queue.remove(0); try { objectOutput.writeObject(aToken.outputMsg); objectOutput.flush(); } catch (IOException e) { onException(e); } /* * Értesíteni a szálat ami küldte ezt a "Token"-t, hogy befejeztük a vele kapcsolatos műveletetek */ synchronized (aToken) { aToken.notify(); } } // "while" ciklus végét jelző zárójel }
@Override public GeoPoint addPoint( String longitude, String latitude, Double altitude, String continent, String country, String capital, List<Token> pointTokens) { GeoPointStored p = new GeoPointStored(); p.longitude = longitude; p.latitude = latitude; p.altitude = altitude; p.continent = continent; p.country = country; p.capital = capital; p.tokRefs = new String[pointTokens.size()]; for (int i = 0; i < pointTokens.size(); i++) { Token token = pointTokens.get(i); p.tokRefs[i] = token.getID(); connector.token2ItsGeopoint.put(token, p); } points.add(p); return p; }