1 // script.cc -- handle linker scripts for gold.
13 #include "workqueue.h"
22 // A token read from a script file. We don't implement keywords here;
23 // all keywords are simply represented as a string.
28 // Token classification.
33 // Token indicates end of input.
35 // Token is a string of characters.
37 // Token is an operator.
39 // Token is a number (an integer).
43 // We need an empty constructor so that we can put this STL objects.
45 : classification_(TOKEN_INVALID
), value_(), opcode_(0),
46 lineno_(0), charpos_(0)
49 // A general token with no value.
50 Token(Classification classification
, int lineno
, int charpos
)
51 : classification_(classification
), value_(), opcode_(0),
52 lineno_(lineno
), charpos_(charpos
)
53 { assert(classification
== TOKEN_INVALID
|| classification
== TOKEN_EOF
); }
55 // A general token with a value.
56 Token(Classification classification
, const std::string
& value
,
57 int lineno
, int charpos
)
58 : classification_(classification
), value_(value
), opcode_(0),
59 lineno_(lineno
), charpos_(charpos
)
60 { assert(classification
!= TOKEN_INVALID
&& classification
!= TOKEN_EOF
); }
62 // A token representing a string of characters.
63 Token(const std::string
& s
, int lineno
, int charpos
)
64 : classification_(TOKEN_STRING
), value_(s
), opcode_(0),
65 lineno_(lineno
), charpos_(charpos
)
68 // A token representing an operator.
69 Token(int opcode
, int lineno
, int charpos
)
70 : classification_(TOKEN_OPERATOR
), value_(), opcode_(opcode
),
71 lineno_(lineno
), charpos_(charpos
)
74 // Return whether the token is invalid.
77 { return this->classification_
== TOKEN_INVALID
; }
79 // Return whether this is an EOF token.
82 { return this->classification_
== TOKEN_EOF
; }
84 // Return the token classification.
86 classification() const
87 { return this->classification_
; }
89 // Return the line number at which the token starts.
92 { return this->lineno_
; }
94 // Return the character position at this the token starts.
97 { return this->charpos_
; }
99 // Get the value of a token.
104 assert(this->classification_
== TOKEN_STRING
);
109 operator_value() const
111 assert(this->classification_
== TOKEN_OPERATOR
);
112 return this->opcode_
;
116 integer_value() const
118 assert(this->classification_
== TOKEN_INTEGER
);
119 return strtoll(this->value_
.c_str(), NULL
, 0);
123 // The token classification.
124 Classification classification_
;
125 // The token value, for TOKEN_STRING or TOKEN_INTEGER.
127 // The token value, for TOKEN_OPERATOR.
129 // The line number where this token started (one based).
131 // The character position within the line where this token started
136 // This class handles lexing a file into a sequence of tokens. We
137 // don't expect linker scripts to be large, so we just read them and
138 // tokenize them all at once.
143 Lex(Input_file
* input_file
)
144 : input_file_(input_file
), tokens_()
147 // Tokenize the file. Return the final token, which will be either
148 // an invalid token or an EOF token. An invalid token indicates
149 // that tokenization failed.
154 typedef std::vector
<Token
> Token_sequence
;
156 // Return the tokens.
157 const Token_sequence
&
159 { return this->tokens_
; }
163 Lex
& operator=(const Lex
&);
165 // Read the file into a string buffer.
167 read_file(std::string
*);
169 // Make a general token with no value at the current location.
171 make_token(Token::Classification c
, const char* p
) const
172 { return Token(c
, this->lineno_
, p
- this->linestart_
+ 1); }
174 // Make a general token with a value at the current location.
176 make_token(Token::Classification c
, const std::string
& v
, const char* p
)
178 { return Token(c
, v
, this->lineno_
, p
- this->linestart_
+ 1); }
180 // Make an operator token at the current location.
182 make_token(int opcode
, const char* p
) const
183 { return Token(opcode
, this->lineno_
, p
- this->linestart_
+ 1); }
185 // Make an invalid token at the current location.
187 make_invalid_token(const char* p
)
188 { return this->make_token(Token::TOKEN_INVALID
, p
); }
190 // Make an EOF token at the current location.
192 make_eof_token(const char* p
)
193 { return this->make_token(Token::TOKEN_EOF
, p
); }
195 // Return whether C can be the first character in a name. C2 is the
196 // next character, since we sometimes need that.
198 can_start_name(char c
, char c2
);
200 // Return whether C can appear in a name which has already started.
202 can_continue_name(char c
);
204 // Return whether C, C2, C3 can start a hex number.
206 can_start_hex(char c
, char c2
, char c3
);
208 // Return whether C can appear in a hex number.
210 can_continue_hex(char c
);
212 // Return whether C can start a non-hex number.
214 can_start_number(char c
);
216 // Return whether C can appear in a non-hex number.
218 can_continue_number(char c
)
219 { return Lex::can_start_number(c
); }
221 // If C1 C2 C3 form a valid three character operator, return the
222 // opcode. Otherwise return 0.
224 three_char_operator(char c1
, char c2
, char c3
);
226 // If C1 C2 form a valid two character operator, return the opcode.
227 // Otherwise return 0.
229 two_char_operator(char c1
, char c2
);
231 // If C1 is a valid one character operator, return the opcode.
232 // Otherwise return 0.
234 one_char_operator(char c1
);
236 // Read the next token.
238 get_token(const char**);
240 // Skip a C style /* */ comment. Return false if the comment did
243 skip_c_comment(const char**);
245 // Skip a line # comment. Return false if there was no newline.
247 skip_line_comment(const char**);
249 // Build a token CLASSIFICATION from all characters that match
250 // CAN_CONTINUE_FN. The token starts at START. Start matching from
251 // MATCH. Set *PP to the character following the token.
253 gather_token(Token::Classification
, bool (*can_continue_fn
)(char),
254 const char* start
, const char* match
, const char** pp
);
256 // Build a token from a quoted string.
258 gather_quoted_string(const char** pp
);
260 // The file we are reading.
261 Input_file
* input_file_
;
262 // The token sequence we create.
263 Token_sequence tokens_
;
264 // The current line number.
266 // The start of the current line in the buffer.
267 const char* linestart_
;
270 // Read the whole file into memory. We don't expect linker scripts to
271 // be large, so we just use a std::string as a buffer. We ignore the
272 // data we've already read, so that we read aligned buffers.
275 Lex::read_file(std::string
* contents
)
280 unsigned char buf
[BUFSIZ
];
283 this->input_file_
->file().read(off
, sizeof buf
, buf
, &got
);
284 contents
->append(reinterpret_cast<char*>(&buf
[0]), got
);
286 while (got
== sizeof buf
);
289 // Return whether C can be the start of a name, if the next character
290 // is C2. A name can being with a letter, underscore, period, or
291 // dollar sign. Because a name can be a file name, we also permit
292 // forward slash, backslash, and tilde. Tilde is the tricky case
293 // here; GNU ld also uses it as a bitwise not operator. It is only
294 // recognized as the operator if it is not immediately followed by
295 // some character which can appear in a symbol. That is, "~0" is a
296 // symbol name, and "~ 0" is an expression using bitwise not. We are
300 Lex::can_start_name(char c
, char c2
)
304 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
305 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
306 case 'M': case 'N': case 'O': case 'Q': case 'P': case 'R':
307 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
309 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
310 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
311 case 'm': case 'n': case 'o': case 'q': case 'p': case 'r':
312 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
314 case '_': case '.': case '$': case '/': case '\\':
318 return can_continue_name(c2
);
325 // Return whether C can continue a name which has already started.
326 // Subsequent characters in a name are the same as the leading
327 // characters, plus digits and "=+-:[],?*". So in general the linker
328 // script language requires spaces around operators.
331 Lex::can_continue_name(char c
)
335 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
336 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
337 case 'M': case 'N': case 'O': case 'Q': case 'P': case 'R':
338 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
340 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
341 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
342 case 'm': case 'n': case 'o': case 'q': case 'p': case 'r':
343 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
345 case '_': case '.': case '$': case '/': case '\\':
347 case '0': case '1': case '2': case '3': case '4':
348 case '5': case '6': case '7': case '8': case '9':
349 case '=': case '+': case '-': case ':': case '[': case ']':
350 case ',': case '?': case '*':
358 // For a number we accept 0x followed by hex digits, or any sequence
359 // of digits. The old linker accepts leading '$' for hex, and
360 // trailing HXBOD. Those are for MRI compatibility and we don't
361 // accept them. The old linker also accepts trailing MK for mega or
362 // kilo. Those are mentioned in the documentation, and we accept
365 // Return whether C1 C2 C3 can start a hex number.
368 Lex::can_start_hex(char c1
, char c2
, char c3
)
370 if (c1
== '0' && (c2
== 'x' || c2
== 'X'))
371 return Lex::can_continue_hex(c3
);
375 // Return whether C can appear in a hex number.
378 Lex::can_continue_hex(char c
)
382 case '0': case '1': case '2': case '3': case '4':
383 case '5': case '6': case '7': case '8': case '9':
384 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
385 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
393 // Return whether C can start a non-hex number.
396 Lex::can_start_number(char c
)
400 case '0': case '1': case '2': case '3': case '4':
401 case '5': case '6': case '7': case '8': case '9':
409 // If C1 C2 C3 form a valid three character operator, return the
410 // opcode (defined in the yyscript.h file generated from yyscript.y).
411 // Otherwise return 0.
414 Lex::three_char_operator(char c1
, char c2
, char c3
)
419 if (c2
== '<' && c3
== '=')
423 if (c2
== '>' && c3
== '=')
432 // If C1 C2 form a valid two character operator, return the opcode
433 // (defined in the yyscript.h file generated from yyscript.y).
434 // Otherwise return 0.
437 Lex::two_char_operator(char c1
, char c2
)
495 // If C1 is a valid operator, return the opcode. Otherwise return 0.
498 Lex::one_char_operator(char c1
)
531 // Skip a C style comment. *PP points to just after the "/*". Return
532 // false if the comment did not end.
535 Lex::skip_c_comment(const char** pp
)
538 while (p
[0] != '*' || p
[1] != '/')
549 this->linestart_
= p
+ 1;
558 // Skip a line # comment. Return false if there was no newline.
561 Lex::skip_line_comment(const char** pp
)
564 size_t skip
= strcspn(p
, "\n");
573 this->linestart_
= p
;
579 // Build a token CLASSIFICATION from all characters that match
580 // CAN_CONTINUE_FN. Update *PP.
583 Lex::gather_token(Token::Classification classification
,
584 bool (*can_continue_fn
)(char),
589 while ((*can_continue_fn
)(*match
))
592 return this->make_token(classification
,
593 std::string(start
, match
- start
),
597 // Build a token from a quoted string.
600 Lex::gather_quoted_string(const char** pp
)
602 const char* start
= *pp
;
603 const char* p
= start
;
605 size_t skip
= strcspn(p
, "\"\n");
607 return this->make_invalid_token(start
);
609 return this->make_token(Token::TOKEN_STRING
,
610 std::string(p
, skip
),
614 // Return the next token at *PP. Update *PP. General guideline: we
615 // require linker scripts to be simple ASCII. No unicode linker
616 // scripts. In particular we can assume that any '\0' is the end of
620 Lex::get_token(const char** pp
)
629 return this->make_eof_token(p
);
632 // Skip whitespace quickly.
633 while (*p
== ' ' || *p
== '\t')
640 this->linestart_
= p
;
644 // Skip C style comments.
645 if (p
[0] == '/' && p
[1] == '*')
647 int lineno
= this->lineno_
;
648 int charpos
= p
- this->linestart_
+ 1;
651 if (!this->skip_c_comment(pp
))
652 return Token(Token::TOKEN_INVALID
, lineno
, charpos
);
658 // Skip line comments.
662 if (!this->skip_line_comment(pp
))
663 return this->make_eof_token(p
);
669 if (Lex::can_start_name(p
[0], p
[1]))
670 return this->gather_token(Token::TOKEN_STRING
,
671 Lex::can_continue_name
,
674 // We accept any arbitrary name in double quotes, as long as it
675 // does not cross a line boundary.
679 return this->gather_quoted_string(pp
);
682 // Check for a number.
684 if (Lex::can_start_hex(p
[0], p
[1], p
[2]))
685 return this->gather_token(Token::TOKEN_INTEGER
,
686 Lex::can_continue_hex
,
689 if (Lex::can_start_number(p
[0]))
690 return this->gather_token(Token::TOKEN_INTEGER
,
691 Lex::can_continue_number
,
694 // Check for operators.
696 int opcode
= Lex::three_char_operator(p
[0], p
[1], p
[2]);
700 return this->make_token(opcode
, p
);
703 opcode
= Lex::two_char_operator(p
[0], p
[1]);
707 return this->make_token(opcode
, p
);
710 opcode
= Lex::one_char_operator(p
[0]);
714 return this->make_token(opcode
, p
);
717 return this->make_token(Token::TOKEN_INVALID
, p
);
721 // Tokenize the file. Return the final token.
726 std::string contents
;
727 this->read_file(&contents
);
729 const char* p
= contents
.c_str();
732 this->linestart_
= p
;
736 Token
t(this->get_token(&p
));
738 // Don't let an early null byte fool us into thinking that we've
739 // reached the end of the file.
741 && static_cast<size_t>(p
- contents
.c_str()) < contents
.length())
742 t
= this->make_invalid_token(p
);
744 if (t
.is_invalid() || t
.is_eof())
747 this->tokens_
.push_back(t
);
751 // A trivial task which waits for THIS_BLOCKER to be clear and then
752 // clears NEXT_BLOCKER. THIS_BLOCKER may be NULL.
754 class Script_unblock
: public Task
757 Script_unblock(Task_token
* this_blocker
, Task_token
* next_blocker
)
758 : this_blocker_(this_blocker
), next_blocker_(next_blocker
)
763 if (this->this_blocker_
!= NULL
)
764 delete this->this_blocker_
;
768 is_runnable(Workqueue
*)
770 if (this->this_blocker_
!= NULL
&& this->this_blocker_
->is_blocked())
776 locks(Workqueue
* workqueue
)
778 return new Task_locker_block(*this->next_blocker_
, workqueue
);
786 Task_token
* this_blocker_
;
787 Task_token
* next_blocker_
;
790 // This class holds data passed through the parser to the lexer and to
791 // the parser support functions. This avoids global variables. We
792 // can't use global variables because we need not be called in the
798 Parser_closure(const char* filename
,
799 const Position_dependent_options
& posdep_options
,
801 const Lex::Token_sequence
* tokens
)
802 : filename_(filename
), posdep_options_(posdep_options
),
803 in_group_(in_group
), tokens_(tokens
),
804 next_token_index_(0), inputs_(NULL
)
807 // Return the file name.
810 { return this->filename_
; }
812 // Return the position dependent options. The caller may modify
814 Position_dependent_options
&
815 position_dependent_options()
816 { return this->posdep_options_
; }
818 // Return whether this script is being run in a group.
821 { return this->in_group_
; }
823 // Whether we are at the end of the token list.
826 { return this->next_token_index_
>= this->tokens_
->size(); }
828 // Return the next token.
832 const Token
* ret
= &(*this->tokens_
)[this->next_token_index_
];
833 ++this->next_token_index_
;
837 // Return the list of input files, creating it if necessary. This
838 // is a space leak--we never free the INPUTS_ pointer.
842 if (this->inputs_
== NULL
)
843 this->inputs_
= new Input_arguments();
844 return this->inputs_
;
847 // Return whether we saw any input files.
850 { return this->inputs_
!= NULL
&& !this->inputs_
->empty(); }
853 // The name of the file we are reading.
854 const char* filename_
;
855 // The position dependent options.
856 Position_dependent_options posdep_options_
;
857 // Whether we are currently in a --start-group/--end-group.
860 // The tokens to be returned by the lexer.
861 const Lex::Token_sequence
* tokens_
;
862 // The index of the next token to return.
863 unsigned int next_token_index_
;
864 // New input files found to add to the link.
865 Input_arguments
* inputs_
;
868 // FILE was found as an argument on the command line. Try to read it
869 // as a script. We've already read BYTES of data into P, but we
870 // ignore that. Return true if the file was handled.
873 read_input_script(Workqueue
* workqueue
, const General_options
& options
,
874 Symbol_table
* symtab
, Layout
* layout
,
875 const Dirsearch
& dirsearch
, Input_objects
* input_objects
,
876 Input_group
* input_group
,
877 const Input_argument
* input_argument
,
878 Input_file
* input_file
, const unsigned char*, off_t
,
879 Task_token
* this_blocker
, Task_token
* next_blocker
)
882 if (lex
.tokenize().is_invalid())
885 Parser_closure
closure(input_file
->filename().c_str(),
886 input_argument
->file().options(),
890 if (yyparse(&closure
) != 0)
893 // THIS_BLOCKER must be clear before we may add anything to the
894 // symbol table. We are responsible for unblocking NEXT_BLOCKER
895 // when we are done. We are responsible for deleting THIS_BLOCKER
896 // when it is unblocked.
898 if (!closure
.saw_inputs())
900 // The script did not add any files to read. Note that we are
901 // not permitted to call NEXT_BLOCKER->unblock() here even if
902 // THIS_BLOCKER is NULL, as we are not in the main thread.
903 workqueue
->queue(new Script_unblock(this_blocker
, next_blocker
));
907 for (Input_arguments::const_iterator p
= closure
.inputs()->begin();
908 p
!= closure
.inputs()->end();
912 if (p
+ 1 == closure
.inputs()->end())
916 nb
= new Task_token();
919 workqueue
->queue(new Read_symbols(options
, input_objects
, symtab
,
920 layout
, dirsearch
, &*p
,
921 input_group
, this_blocker
, nb
));
928 // Manage mapping from keywords to the codes expected by the bison
931 class Keyword_to_parsecode
934 // The structure which maps keywords to parsecodes.
935 struct Keyword_parsecode
939 // Corresponding parsecode.
943 // Return the parsecode corresponding KEYWORD, or 0 if it is not a
946 keyword_to_parsecode(const char* keyword
);
949 // The array of all keywords.
950 static const Keyword_parsecode keyword_parsecodes_
[];
952 // The number of keywords.
953 static const int keyword_count
;
956 // Mapping from keyword string to keyword parsecode. This array must
957 // be kept in sorted order. Parsecodes are looked up using bsearch.
958 // This array must correspond to the list of parsecodes in yyscript.y.
960 const Keyword_to_parsecode::Keyword_parsecode
961 Keyword_to_parsecode::keyword_parsecodes_
[] =
963 { "ABSOLUTE", ABSOLUTE
},
965 { "ALIGN", ALIGN_K
},
966 { "ASSERT", ASSERT_K
},
967 { "AS_NEEDED", AS_NEEDED
},
972 { "CONSTANT", CONSTANT
},
973 { "CONSTRUCTORS", CONSTRUCTORS
},
975 { "CREATE_OBJECT_SYMBOLS", CREATE_OBJECT_SYMBOLS
},
976 { "DATA_SEGMENT_ALIGN", DATA_SEGMENT_ALIGN
},
977 { "DATA_SEGMENT_END", DATA_SEGMENT_END
},
978 { "DATA_SEGMENT_RELRO_END", DATA_SEGMENT_RELRO_END
},
979 { "DEFINED", DEFINED
},
982 { "EXCLUDE_FILE", EXCLUDE_FILE
},
983 { "EXTERN", EXTERN
},
986 { "FORCE_COMMON_ALLOCATION", FORCE_COMMON_ALLOCATION
},
989 { "INCLUDE", INCLUDE
},
991 { "INHIBIT_COMMON_ALLOCATION", INHIBIT_COMMON_ALLOCATION
},
994 { "LENGTH", LENGTH
},
995 { "LOADADDR", LOADADDR
},
999 { "MEMORY", MEMORY
},
1002 { "NOCROSSREFS", NOCROSSREFS
},
1003 { "NOFLOAT", NOFLOAT
},
1004 { "NOLOAD", NOLOAD
},
1005 { "ONLY_IF_RO", ONLY_IF_RO
},
1006 { "ONLY_IF_RW", ONLY_IF_RW
},
1007 { "ORIGIN", ORIGIN
},
1008 { "OUTPUT", OUTPUT
},
1009 { "OUTPUT_ARCH", OUTPUT_ARCH
},
1010 { "OUTPUT_FORMAT", OUTPUT_FORMAT
},
1011 { "OVERLAY", OVERLAY
},
1013 { "PROVIDE", PROVIDE
},
1014 { "PROVIDE_HIDDEN", PROVIDE_HIDDEN
},
1016 { "SEARCH_DIR", SEARCH_DIR
},
1017 { "SECTIONS", SECTIONS
},
1018 { "SEGMENT_START", SEGMENT_START
},
1020 { "SIZEOF", SIZEOF
},
1021 { "SIZEOF_HEADERS", SIZEOF_HEADERS
},
1022 { "SORT_BY_ALIGNMENT", SORT_BY_ALIGNMENT
},
1023 { "SORT_BY_NAME", SORT_BY_NAME
},
1024 { "SPECIAL", SPECIAL
},
1026 { "STARTUP", STARTUP
},
1027 { "SUBALIGN", SUBALIGN
},
1028 { "SYSLIB", SYSLIB
},
1029 { "TARGET", TARGET_K
},
1030 { "TRUNCATE", TRUNCATE
},
1031 { "VERSION", VERSIONK
},
1032 { "global", GLOBAL
},
1038 { "sizeof_headers", SIZEOF_HEADERS
},
1041 const int Keyword_to_parsecode::keyword_count
=
1042 (sizeof(Keyword_to_parsecode::keyword_parsecodes_
)
1043 / sizeof(Keyword_to_parsecode::keyword_parsecodes_
[0]));
1045 // Comparison function passed to bsearch.
1051 ktt_compare(const void* keyv
, const void* kttv
)
1053 const char* key
= static_cast<const char*>(keyv
);
1054 const Keyword_to_parsecode::Keyword_parsecode
* ktt
=
1055 static_cast<const Keyword_to_parsecode::Keyword_parsecode
*>(kttv
);
1056 return strcmp(key
, ktt
->keyword
);
1059 } // End extern "C".
1062 Keyword_to_parsecode::keyword_to_parsecode(const char* keyword
)
1064 void* kttv
= bsearch(keyword
,
1065 Keyword_to_parsecode::keyword_parsecodes_
,
1066 Keyword_to_parsecode::keyword_count
,
1067 sizeof(Keyword_to_parsecode::keyword_parsecodes_
[0]),
1071 Keyword_parsecode
* ktt
= static_cast<Keyword_parsecode
*>(kttv
);
1072 return ktt
->parsecode
;
1075 } // End namespace gold.
1077 // The remaining functions are extern "C", so it's clearer to not put
1078 // them in namespace gold.
1080 using namespace gold
;
1082 // This function is called by the bison parser to return the next
1086 yylex(YYSTYPE
* lvalp
, void* closurev
)
1088 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1090 if (closure
->at_eof())
1093 const Token
* token
= closure
->next_token();
1095 switch (token
->classification())
1098 case Token::TOKEN_INVALID
:
1099 case Token::TOKEN_EOF
:
1102 case Token::TOKEN_STRING
:
1104 const char* str
= token
->string_value().c_str();
1105 int parsecode
= Keyword_to_parsecode::keyword_to_parsecode(str
);
1108 lvalp
->string
= str
;
1112 case Token::TOKEN_OPERATOR
:
1113 return token
->operator_value();
1115 case Token::TOKEN_INTEGER
:
1116 lvalp
->integer
= token
->integer_value();
1121 // This function is called by the bison parser to report an error.
1124 yyerror(void* closurev
, const char* message
)
1126 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1128 fprintf(stderr
, _("%s: %s: %s\n"),
1129 program_name
, closure
->filename(), message
);
1133 // Called by the bison parser to add a file to the link.
1136 script_add_file(void* closurev
, const char* name
)
1138 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1139 Input_file_argument
file(name
, false, closure
->position_dependent_options());
1140 closure
->inputs()->add_file(file
);
1143 // Called by the bison parser to start a group. If we are already in
1144 // a group, that means that this script was invoked within a
1145 // --start-group --end-group sequence on the command line, or that
1146 // this script was found in a GROUP of another script. In that case,
1147 // we simply continue the existing group, rather than starting a new
1148 // one. It is possible to construct a case in which this will do
1149 // something other than what would happen if we did a recursive group,
1150 // but it's hard to imagine why the different behaviour would be
1151 // useful for a real program. Avoiding recursive groups is simpler
1152 // and more efficient.
1155 script_start_group(void* closurev
)
1157 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1158 if (!closure
->in_group())
1159 closure
->inputs()->start_group();
1162 // Called by the bison parser at the end of a group.
1165 script_end_group(void* closurev
)
1167 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1168 if (!closure
->in_group())
1169 closure
->inputs()->end_group();
1172 // Called by the bison parser to start an AS_NEEDED list.
1175 script_start_as_needed(void* closurev
)
1177 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1178 closure
->position_dependent_options().set_as_needed();
1181 // Called by the bison parser at the end of an AS_NEEDED list.
1184 script_end_as_needed(void* closurev
)
1186 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1187 closure
->position_dependent_options().clear_as_needed();