You can not select more than 25 topics
			Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
		
		
		
		
		
			
		
			
				
					
					
						
							354 lines
						
					
					
						
							12 KiB
						
					
					
				
			
		
		
		
			
			
			
		
		
	
	
							354 lines
						
					
					
						
							12 KiB
						
					
					
				| # | |
| #  This program source code file is part of KICAD, a free EDA CAD application. | |
| # | |
| #  Copyright (C) 2010 Wayne Stambaugh <stambaughw@verizon.net> | |
| #  Copyright (C) 2010 Kicad Developers, see AUTHORS.txt for contributors. | |
| # | |
| #  This program is free software; you can redistribute it and/or | |
| #  modify it under the terms of the GNU General Public License | |
| #  as published by the Free Software Foundation; either version 2 | |
| #  of the License, or (at your option) any later version. | |
| # | |
| #  This program is distributed in the hope that it will be useful, | |
| #  but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | |
| #  GNU General Public License for more details. | |
| # | |
| #  You should have received a copy of the GNU General Public License | |
| #  along with this program; if not, you may find one here: | |
| #  http://www.gnu.org/licenses/old-licenses/gpl-2.0.html | |
| #  or you may search the http://www.gnu.org website for the version 2 license, | |
| #  or you may write to the Free Software Foundation, Inc., | |
| #  51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA | |
| # | |
| # | |
| # This script converts a plain text file with a line feed separated list | |
| # of token names into the appropriate source and header files required by | |
| # the DSN lexer.  See files "<base_source_path>/common/dsnlexer.cpp" and | |
| # "<base_source_path>/include/dsnlexer.h" for more information about how | |
| # the DSN lexer works.  The token list file format requires a single token | |
| # per line.  Tokens can only contain lower case letters, numbers, and | |
| # underscores.  The first letter of each token must be a lower case letter. | |
| # Tokens must be unique.  If any of the above criteria are not met, the | |
| # source and header files will not be generated and a build error will | |
| # occur. | |
| # | |
| # Valid tokens:    a a1 foo_1 foo_bar2 | |
| # Invalid tokens:  1 A _foo bar_ foO | |
| # | |
| # Invocation Parameters are:  enum, inputFile, outCppFile, outHeaderFile | |
| # | |
| #     enum       - Required, name of the enum to generate. | |
| # | |
| #     inputFile  - Required, name of the token list file, or "*.keywords" file. | |
| #                  Choose the basefilename carefully, it decides the class name | |
| #                  used in the generated *_lexer.h file. | |
| # | |
| #     outCppFile - Optional, full path and file name of where to save the generated | |
| #                  cpp keywords file.  If not defined, the output path is the same | |
| #                  path as the token list file path, with a file name of *_keywords.cpp | |
| # | |
| #  outHeaderFile - Optional, full path and file name of where to save the generated | |
| #                  *.h lexfer file.  If not defined, the output path is the same | |
| #                  path as the token list file path, with a file name of *_lexer.h | |
| # | |
| # Example Usage from within a CMakeLists.txt file is shown below.  CMake itself | |
| # is invoked as a child process to execute this script and parameters are passed on | |
| # the command line, which is formulated as the "COMMAND" sequence below: | |
| # | |
| # add_custom_command( | |
| #     OUTPUT  ${CMAKE_BINARY_DIR}/cmp_library_lexer.h | |
| #             ${CMAKE_BINARY_DIR}/cmp_library_keywords.cpp | |
| #     COMMAND ${CMAKE_COMMAND} | |
| #             -Denum=YOURTOK_T | |
| #             -DinputFile=${CMAKE_CURRENT_SOURCE_DIR}/cmp_library.keywords | |
| #             -DoutCppFile=${CMAKE_CURRENT_SOURCE_DIR}/cmp_library_keywords.cpp | |
| #             -DoutHeaderFile=${CMAKE_CURRENT_SOURCE_DIR}/cmp_library_lexer.h | |
| #             -P ${CMAKE_MODULE_PATH}/TokenList2DsnLexer.cmake | |
| #     DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/cmp_library.keywords | |
| #     COMMENT "creating ${CMAKE_CURRENT_SOURCE_DIR}/cmp_library_{lexer.h,keywords.cpp} | |
| #         from ${CMAKE_CURRENT_SOURCE_DIR}/cmp_library.keywords" | |
| # ) | |
|  | |
| #message( STATUS "TokenList2DsnLexer.cmake" )    # indicate we are running | |
|  | |
| set( tokens "" ) | |
| set( lineCount 0 ) | |
| set( dsnErrorMsg "TokenList2DsnLexer.cmake failure:" ) | |
|  | |
| if( NOT EXISTS ${inputFile} ) | |
|     message( FATAL_ERROR "${dsnErrorMsg} file ${inputFile} cannot be found." ) | |
| endif() | |
|  | |
| if( NOT DEFINED enum ) | |
|     message( FATAL_ERROR "${dsnErrorMsg} missing \"enum\" processing ${inputFile}." ) | |
| endif() | |
|  | |
| get_filename_component( outputPath "${inputFile}" PATH ) | |
|  | |
| # the keywords filename without extension is important, it sets the classname into RESULT | |
| get_filename_component( result "${inputFile}" NAME_WE ) | |
| string( TOUPPER "${result}" RESULT ) | |
|  | |
| #message( "enum:'${enum}' result:'${result}' outputPath:'${outputPath}' inputFile:'${inputFile}'" ) | |
|  | |
| if( NOT DEFINED outCppFile ) | |
|     set( outCppFile "${outputPath}/${result}_keywords.cpp" ) | |
| endif() | |
|  | |
| if( NOT DEFINED outHeaderFile ) | |
|     set( outHeaderFile "${outputPath}/${result}_lexer.h" ) | |
| endif() | |
|  | |
| # Create tag for generating header file. | |
| set( headerTag "_${RESULT}_H_" ) | |
|  | |
| set( includeFileHeader | |
| " | |
| /* Do not modify this file it was automatically generated by the | |
|  * TokenList2DsnLexer CMake script. | |
|  */ | |
|  | |
| #ifndef ${headerTag} | |
| #define ${headerTag} | |
|  | |
| #include \"dsnlexer.h\" | |
|  | |
| namespace DSN { | |
|  | |
| enum ${enum} { | |
|  | |
|     // these first few are negative special ones for syntax, and are | |
|     // inherited from DSNLEXER. | |
|     T_NONE = DSN_NONE, | |
|     T_COMMENT = DSN_COMMENT, | |
|     T_STRING_QUOTE = DSN_STRING_QUOTE, | |
|     T_QUOTE_DEF = DSN_QUOTE_DEF, | |
|     T_DASH = DSN_DASH, | |
|     T_SYMBOL = DSN_SYMBOL, | |
|     T_NUMBER = DSN_NUMBER, | |
|     T_RIGHT = DSN_RIGHT,    // right bracket, ')' | |
|     T_LEFT = DSN_LEFT,      // left bracket, '(' | |
|     T_STRING = DSN_STRING,  // a quoted string, stripped of the quotes | |
|     T_EOF = DSN_EOF,        // special case for end of file | |
|  | |
| " | |
| ) | |
|  | |
| set( sourceFileHeader | |
| " | |
| /* Do not modify this file it was automatically generated by the | |
|  * TokenList2DsnLexer CMake script. | |
|  * | |
|  * Include this file in your lexer class to provide the keywords for | |
|  * your DSN lexer. | |
|  */ | |
|  | |
| #include \"${result}_lexer.h\" | |
|  | |
| namespace DSN { | |
|  | |
| #define TOKDEF(x)    { #x, T_##x } | |
|  | |
| const KEYWORD ${result}_keywords[] = { | |
| " | |
| ) | |
|  | |
| file( STRINGS ${inputFile} tmpTokens NO_HEX_CONVERSION ) | |
|  | |
| foreach( tmpToken ${tmpTokens} ) | |
|     math( EXPR lineCount "${lineCount} + 1" ) | |
|  | |
|     string( STRIP tmpToken "${tmpToken}" ) | |
|  | |
|     # Ignore empty lines. | |
|     if( tmpToken ) | |
|         # Make sure token is valid. | |
|         string( REGEX MATCH "[a-z][_0-9a-z]*[0-9a-z]$" validToken "${tmpToken}" ) | |
|         if( validToken STREQUAL tmpToken ) | |
|             list( APPEND tokens "${validToken}" ) | |
|         else( validToken STREQUAL tmpToken ) | |
|             message( FATAL_ERROR | |
|                      "Invalid token string \"${tmpToken}\" at line ${lineCount} in file " | |
|                      "<${inputFile}>." ) | |
|         endif( validToken STREQUAL tmpToken ) | |
|     endif( tmpToken ) | |
| endforeach( tmpToken ${tmpTokens} ) | |
|  | |
| list( SORT tokens ) | |
|  | |
| # Check for duplicates. | |
| list( LENGTH tokens tokensBefore ) | |
| list( REMOVE_DUPLICATES tokens ) | |
| list( LENGTH tokens tokensAfter ) | |
|  | |
| if( NOT ( tokensBefore EQUAL tokensAfter ) ) | |
|     message( FATAL_ERROR "Duplicate tokens found in file <${inputFile}>." ) | |
| endif( NOT ( tokensBefore EQUAL tokensAfter ) ) | |
|  | |
| file( WRITE "${outHeaderFile}" "${includeFileHeader}" ) | |
| file( WRITE "${outCppFile}" "${sourceFileHeader}" ) | |
|  | |
| set( lineCount 1 ) | |
|  | |
| foreach( token ${tokens} ) | |
|     if( lineCount EQUAL 1 ) | |
|         file( APPEND "${outHeaderFile}" "    T_${token} = 0" ) | |
|     else( lineCount EQUAL 1 ) | |
|         file( APPEND "${outHeaderFile}" "    T_${token}" ) | |
|     endif( lineCount EQUAL 1 ) | |
|  | |
|     file(APPEND "${outCppFile}" "    TOKDEF( ${token} )" ) | |
|  | |
|     if( lineCount EQUAL tokensAfter ) | |
|         file( APPEND "${outHeaderFile}" "\n" ) | |
|         file( APPEND "${outCppFile}" "\n" ) | |
|     else( lineCount EQUAL tokensAfter ) | |
|         file( APPEND "${outHeaderFile}" ",\n" ) | |
|         file( APPEND "${outCppFile}" ",\n" ) | |
|     endif( lineCount EQUAL tokensAfter ) | |
|     math( EXPR lineCount "${lineCount} + 1" ) | |
| endforeach( token ${tokens} ) | |
|  | |
| file( APPEND "${outHeaderFile}" | |
| "}; | |
|  | |
| extern const KEYWORD  ${result}_keywords[]; | |
| extern const unsigned ${result}_keyword_count; | |
|  | |
| }   // End namespace DSN | |
|  | |
| using namespace DSN;    // enum ${enum} is in this namespace | |
|  | |
| class ${RESULT}_LEXER : public DSNLEXER | |
| { | |
| public: | |
|  | |
|     /** | |
|      * Constructor ( const std::string&, const wxString& ) | |
|      * @param aSExpression is (utf8) text possibly from the clipboard that you want to parse. | |
|      * @param aSource is a description of the origin of @a aSExpression, such as a filename. | |
|      *   If left empty, then _("clipboard") is used. | |
|      */ | |
|     ${RESULT}_LEXER( const std::string& aSExpression, const wxString& aSource = wxEmptyString ) : | |
|         DSNLEXER( DSN::${result}_keywords, DSN::${result}_keyword_count, | |
|                   aSExpression, aSource ) | |
|     { | |
|     } | |
|  | |
|     /** | |
|      * Constructor ( FILE* ) | |
|      * takes @a aFile already opened for reading and @a aFilename as parameters. | |
|      * The opened file is assumed to be positioned at the beginning of the file | |
|      * for purposes of accurate line number reporting in error messages.  The | |
|      * FILE is closed by this instance when its destructor is called. | |
|      * @param aFile is a FILE already opened for reading. | |
|      * @param aFilename is the name of the opened file, needed for error reporting. | |
|      */ | |
|     ${RESULT}_LEXER( FILE* aFile, const wxString& aFilename ) : | |
|         DSNLEXER( DSN::${result}_keywords, DSN::${result}_keyword_count, | |
|                   aFile, aFilename ) | |
|     { | |
|     } | |
|  | |
|     /** | |
|      * Constructor ( LINE_READER* ) | |
|      * intializes a lexer and prepares to read from @a aLineReader which | |
|      * is assumed ready, and may be in use by other DSNLEXERs also.  No ownership | |
|      * is taken of @a aLineReader. This enables it to be used by other lexers also. | |
|      * The transition between grammars in such a case, must happen on a text | |
|      * line boundary, not within the same line of text. | |
|      * | |
|      * @param aLineReader is any subclassed instance of LINE_READER, such as | |
|      *  STRING_LINE_READER or FILE_LINE_READER.  No ownership is taken of aLineReader. | |
|      */ | |
|     ${RESULT}_LEXER( LINE_READER* aLineReader ) : | |
|         DSNLEXER( DSN::${result}_keywords, DSN::${result}_keyword_count, | |
|                   aLineReader ) | |
|     { | |
|     } | |
|  | |
|     /** | |
|      * Function NextTok | |
|      * returns the next token found in the input file or T_EOF when reaching | |
|      * the end of file.  Users should wrap this function to return an enum | |
|      * to aid in grammar debugging while running under a debugger, but leave | |
|      * this lower level function returning an int (so the enum does not collide | |
|      * with another usage). | |
|      * @return ${enum} - the type of token found next. | |
|      * @throw IO_ERROR - only if the LINE_READER throws it. | |
|      */ | |
|     ${enum} NextTok() throw( IO_ERROR ) | |
|     { | |
|         return (${enum}) DSNLEXER::NextTok(); | |
|     } | |
|  | |
|     /** | |
|      * Function NeedSYMBOL | |
|      * calls NextTok() and then verifies that the token read in | |
|      * satisfies bool IsSymbol(). | |
|      * If not, an IO_ERROR is thrown. | |
|      * @return int - the actual token read in. | |
|      * @throw IO_ERROR, if the next token does not satisfy IsSymbol() | |
|      */ | |
|     ${enum} NeedSYMBOL() throw( IO_ERROR ) | |
|     { | |
|         return (${enum}) DSNLEXER::NeedSYMBOL(); | |
|     } | |
|  | |
|     /** | |
|      * Function NeedSYMBOLorNUMBER | |
|      * calls NextTok() and then verifies that the token read in | |
|      * satisfies bool IsSymbol() or tok==T_NUMBER. | |
|      * If not, an IO_ERROR is thrown. | |
|      * @return int - the actual token read in. | |
|      * @throw IO_ERROR, if the next token does not satisfy the above test | |
|      */ | |
|     ${enum} NeedSYMBOLorNUMBER() throw( IO_ERROR ) | |
|     { | |
|         return (${enum}) DSNLEXER::NeedSYMBOLorNUMBER(); | |
|     } | |
|  | |
|     /** | |
|      * Function CurTok | |
|      * returns whatever NextTok() returned the last time it was called. | |
|      */ | |
|     ${enum} CurTok() | |
|     { | |
|         return (${enum}) DSNLEXER::CurTok(); | |
|     } | |
|  | |
|     /** | |
|      * Function PrevTok | |
|      * returns whatever NextTok() returned the 2nd to last time it was called. | |
|      */ | |
|     ${enum} PrevTok() | |
|     { | |
|         return (${enum}) DSNLEXER::PrevTok(); | |
|     } | |
| }; | |
|  | |
| // example usage | |
|  | |
| /** | |
|  * Class ${RESULT}_PARSER | |
|  * holds data and functions pertinent to parsing a S-expression file . | |
|  * | |
| class ${RESULT}_PARSER : public ${RESULT}_LEXER | |
| { | |
|  | |
| }; | |
|  */ | |
|  | |
| #endif   // ${headerTag} | |
| " | |
| ) | |
|  | |
| file( APPEND "${outCppFile}" | |
| "}; | |
|  | |
| const unsigned ${result}_keyword_count = unsigned( sizeof( ${result}_keywords )/sizeof( ${result}_keywords[0] ) ); | |
|  | |
| }   // End namespace DSN | |
| " | |
| )
 |