now copying the language correctly, at least I hope
This commit is contained in:
parent
2d70208978
commit
5f34dbfeca
5 changed files with 44 additions and 22 deletions
|
|
@ -324,7 +324,8 @@ uint32_t ARC_Lexer_AutomataMatchCharOrBetweenFn(ARC_String **tokenData, ARC_Stri
|
|||
char *automataDataChars = (char *)automataData;
|
||||
if(string->data[0] >= automataDataChars[0] && string->data[0] <= ((char *)automataData)[1]){
|
||||
//return the token as token data and the token was found of length 1
|
||||
ARC_String_Create(tokenData, string->data, 1);
|
||||
//TODO: fix this
|
||||
//ARC_String_Create(tokenData, string->data, 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,13 +25,25 @@ void ARC_Parser_Create(ARC_Parser **parser, ARC_Array *language, ARC_Parser_Init
|
|||
(*parser)->language.size = language->size;
|
||||
(*parser)->language.data = malloc(sizeof(ARC_ParserLanguageTag) * language->size);
|
||||
|
||||
memcpy((*parser)->language.data, language->data, sizeof(ARC_ParserLanguageTag) * language->size);
|
||||
for(uint32_t index = 0; index < language->size; index++){
|
||||
ARC_ParserLanguageTag *languageTag = ((ARC_ParserLanguageTag *)language->data) + index;
|
||||
ARC_ParserLanguageTag *currentTag = ((ARC_ParserLanguageTag *)(*parser)->language.data) + index;
|
||||
|
||||
//copy the currentTag's tokenOrTags
|
||||
memcpy(currentTag->tokensOrTags, languageTag->tokensOrTags, languageTag->tokensOrTagsSize);
|
||||
//copy the language tag into the current tag
|
||||
currentTag->tagId = languageTag->tagId;
|
||||
currentTag->tokensOrTagsSize = languageTag->tokensOrTagsSize;
|
||||
|
||||
//create place to store tokens or tags
|
||||
currentTag->tokensOrTags = (uint32_t **)malloc(sizeof(uint32_t *) * languageTag->tokensOrTagsSize);
|
||||
|
||||
//copy each or section into the tokensOrTags
|
||||
for(uint32_t orIndex = 0; orIndex < languageTag->tokensOrTagsSize; orIndex++){
|
||||
currentTag->tokensOrTags[orIndex] = (uint32_t *)malloc(sizeof(uint32_t) * (languageTag->tokensOrTags[orIndex][0] + 1));
|
||||
|
||||
for(uint32_t tokenOrTagIndex = 0; tokenOrTagIndex < languageTag->tokensOrTags[orIndex][0] + 1; tokenOrTagIndex++){
|
||||
currentTag->tokensOrTags[orIndex][tokenOrTagIndex] = languageTag->tokensOrTags[orIndex][tokenOrTagIndex];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -49,6 +61,12 @@ void ARC_Parser_Destroy(ARC_Parser *parser){
|
|||
//clear all the copied token or tags from memory
|
||||
for(uint32_t index = 0; index < parser->language.size; index++){
|
||||
ARC_ParserLanguageTag *currentTag = ((ARC_ParserLanguageTag *)parser->language.data) + index;
|
||||
|
||||
//free the orIndex vlues
|
||||
for(uint32_t orIndex = 0; orIndex < currentTag->tokensOrTagsSize; orIndex++){
|
||||
free(currentTag->tokensOrTags[orIndex]);
|
||||
}
|
||||
|
||||
free(currentTag->tokensOrTags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ void ARC_ParserLang_InitLexerRulesFn(ARC_Lexer *lexer){
|
|||
}
|
||||
|
||||
void ARC_Parser_CreateAsParserLang(ARC_Parser **parser){
|
||||
//<line> -> <body> NEWLINE <line> | <body> | NEWLINE <line> | LAMBDA
|
||||
uint32_t *line[] = { (uint32_t[]){ 3, ARC_PARSERLANG_BODY, ARC_PARSERLANG_TOKEN_NEWLINE_ID, ARC_PARSERLANG_TOKEN_ARROW_ID }, (uint32_t[]){ 1, ARC_PARSERLANG_BODY }, (uint32_t[]){ 2, ARC_PARSERLANG_TOKEN_NEWLINE_ID, ARC_PARSERLANG_LINE }, (uint32_t[]){ 1, ARC_PARSERLANG_LAMBDA } };
|
||||
//<line> -> <body> NEWLINE <line> | <body> | NEWLINE <line> | LAMBDA
|
||||
uint32_t *line[] = { (uint32_t[]){ 3, ARC_PARSERLANG_BODY, ARC_PARSERLANG_TOKEN_NEWLINE_ID, ARC_PARSERLANG_LINE }, (uint32_t[]){ 1, ARC_PARSERLANG_BODY }, (uint32_t[]){ 2, ARC_PARSERLANG_TOKEN_NEWLINE_ID, ARC_PARSERLANG_LINE }, (uint32_t[]){ 1, ARC_PARSERLANG_LAMBDA } };
|
||||
|
||||
//<body> -> <tag> WHITESPACE ARROW WHITESPACE <arguments>
|
||||
uint32_t *body[] = { (uint32_t[]){ 5, ARC_PARSERLANG_TAG, ARC_PARSERLANG_TOKEN_WHITESPACE, ARC_PARSERLANG_TOKEN_ARROW_ID, ARC_PARSERLANG_TOKEN_WHITESPACE, ARC_PARSERLANG_ARGUMENTS } };
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue