134 lines
		
	
	
		
			3.7 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
			
		
		
	
	
			134 lines
		
	
	
		
			3.7 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
use nom::IResult;
 | 
						|
use std::cell::RefCell;
 | 
						|
use std::rc::Rc;
 | 
						|
use zero_parser::parser::{ParserContext, ParserResult};
 | 
						|
 | 
						|
mod nom_parsers;
 | 
						|
pub mod zero_parsers;
 | 
						|
 | 
						|
/// 词法令牌的类别
 | 
						|
#[derive(Debug, Copy, Clone)]
 | 
						|
pub enum LexicalTokenType {
 | 
						|
    Identifier,
 | 
						|
    /// 整型常数类型
 | 
						|
    /// 在词法分析阶段就得到常量的字面值
 | 
						|
    ConstInteger(u32),
 | 
						|
    /// 浮点常数类型
 | 
						|
    ConstFloat(f32),
 | 
						|
    Keyword,
 | 
						|
    Delimiter,
 | 
						|
    Operator,
 | 
						|
    LiteralString,
 | 
						|
}
 | 
						|
 | 
						|
/// 词法令牌
 | 
						|
#[derive(PartialEq, Debug, Copy, Clone)]
 | 
						|
pub struct LexicalToken<'a> {
 | 
						|
    pub token_type: LexicalTokenType,
 | 
						|
    pub literal_value: &'a str,
 | 
						|
}
 | 
						|
 | 
						|
#[derive(PartialEq, Debug, Copy, Clone)]
 | 
						|
pub struct NewLexicalToken<'a> {
 | 
						|
    pub token_type: LexicalTokenType,
 | 
						|
    pub literal_value: &'a [char],
 | 
						|
}
 | 
						|
 | 
						|
/// 为词法令牌类型实现相等性判断
 | 
						|
/// 重写判断的原因是我们希望不同值类型的整型常量和浮点常量可以是相同的
 | 
						|
///
 | 
						|
/// 即满足如下的判断:
 | 
						|
/// ```
 | 
						|
/// use rustic_sysy::tokenizer::LexicalTokenType;
 | 
						|
///
 | 
						|
/// assert_eq!(LexicalTokenType::ConstInteger(0), LexicalTokenType::ConstInteger(2));
 | 
						|
/// assert_eq!(LexicalTokenType::ConstFloat(0f32), LexicalTokenType::ConstFloat(2f32));
 | 
						|
/// ```
 | 
						|
impl PartialEq for LexicalTokenType {
 | 
						|
    fn eq(&self, other: &Self) -> bool {
 | 
						|
        match self {
 | 
						|
            LexicalTokenType::ConstInteger(_) => match other {
 | 
						|
                LexicalTokenType::ConstInteger(_) => true,
 | 
						|
                _ => false,
 | 
						|
            },
 | 
						|
            LexicalTokenType::ConstFloat(_) => match other {
 | 
						|
                LexicalTokenType::ConstFloat(_) => true,
 | 
						|
                _ => false,
 | 
						|
            },
 | 
						|
            LexicalTokenType::LiteralString => match other {
 | 
						|
                LexicalTokenType::LiteralString => true,
 | 
						|
                _ => false,
 | 
						|
            },
 | 
						|
            LexicalTokenType::Keyword => match other {
 | 
						|
                LexicalTokenType::Keyword => true,
 | 
						|
                _ => false,
 | 
						|
            },
 | 
						|
            LexicalTokenType::Identifier => match other {
 | 
						|
                LexicalTokenType::Identifier => true,
 | 
						|
                _ => false,
 | 
						|
            },
 | 
						|
            LexicalTokenType::Delimiter => match other {
 | 
						|
                LexicalTokenType::Delimiter => true,
 | 
						|
                _ => false,
 | 
						|
            },
 | 
						|
            LexicalTokenType::Operator => match other {
 | 
						|
                LexicalTokenType::Operator => true,
 | 
						|
                _ => false,
 | 
						|
            },
 | 
						|
        }
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
impl<'a, 'b> PartialEq<NewLexicalToken<'a>> for LexicalToken<'b> {
 | 
						|
    fn eq(&self, other: &NewLexicalToken) -> bool {
 | 
						|
        self.token_type == other.token_type
 | 
						|
            && self.literal_value.chars().collect::<String>()
 | 
						|
                == other.literal_value.iter().collect::<String>()
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
pub fn nom_lexical_parser(mut input: &str) -> IResult<&str, Vec<LexicalToken>> {
 | 
						|
    let mut array = vec![];
 | 
						|
 | 
						|
    while !input.is_empty() {
 | 
						|
        if let Ok((i, _)) = nom_parsers::junk_parser(input) {
 | 
						|
            if i.is_empty() {
 | 
						|
                break;
 | 
						|
            }
 | 
						|
 | 
						|
            input = i;
 | 
						|
            continue;
 | 
						|
        }
 | 
						|
 | 
						|
        let (i, token) = nom_parsers::combine_parser(input)?;
 | 
						|
        input = i;
 | 
						|
        array.push(token);
 | 
						|
    }
 | 
						|
 | 
						|
    Ok((input, array))
 | 
						|
}
 | 
						|
 | 
						|
pub fn zero_lexical_parser(
 | 
						|
    context: Rc<RefCell<ParserContext<()>>>,
 | 
						|
    mut input: &[char],
 | 
						|
) -> ParserResult<char, Vec<NewLexicalToken>> {
 | 
						|
    let mut array = vec![];
 | 
						|
 | 
						|
    while !input.is_empty() {
 | 
						|
        if let Ok((i, _)) = zero_parsers::junk_parser(context.clone(), input) {
 | 
						|
            if i.is_empty() {
 | 
						|
                break;
 | 
						|
            }
 | 
						|
 | 
						|
            input = i;
 | 
						|
            continue;
 | 
						|
        }
 | 
						|
 | 
						|
        let (i, token) = zero_parsers::combine_parser(context.clone(), input)?;
 | 
						|
        input = i;
 | 
						|
        array.push(token);
 | 
						|
    }
 | 
						|
 | 
						|
    Ok((input, array))
 | 
						|
}
 |