Reduced token size

This commit is contained in:
Eduard Urbach 2024-07-20 23:33:07 +02:00
parent 43cdac5572
commit ca36d34cb9
Signed by: akyoto
GPG Key ID: C874F672B1AF20C0
4 changed files with 21 additions and 17 deletions

View File

@ -14,13 +14,13 @@ type Error struct {
Err error
File *fs.File
Stack string
Position int
Position token.Position
}
// New generates an error message at the current token position.
// The error message is clickable in popular editors and leads you
// directly to the faulty file at the given line and position.
func New(err error, file *fs.File, position int) *Error {
func New(err error, file *fs.File, position token.Position) *Error {
return &Error{
Err: err,
File: file,
@ -48,12 +48,12 @@ func (e *Error) Error() string {
for _, t := range e.File.Tokens {
if t.Position >= e.Position {
column = e.Position - lineStart
column = int(e.Position) - lineStart
break
}
if t.Kind == token.NewLine {
lineStart = t.Position
lineStart = int(t.Position)
line++
}
}

View File

@ -0,0 +1,4 @@
package token
// Position is the data type for storing file offsets.
type Position = uint32

View File

@ -10,13 +10,13 @@ import (
// This makes parsing easier and allows us to do better syntax checks.
type Token struct {
Bytes []byte
Position int
Position Position
Kind Kind
}
// End returns the position after the token.
func (t *Token) End() int {
return t.Position + len(t.Bytes)
func (t *Token) End() Position {
return t.Position + Position(len(t.Bytes))
}
// String creates a human readable representation for debugging purposes.

View File

@ -18,11 +18,11 @@ var (
// Tokenize turns the file contents into a list of tokens.
func Tokenize(buffer []byte) List {
var (
i int
i Position
tokens = make(List, 0, 8+len(buffer)/2)
)
for i < len(buffer) {
for i < Position(len(buffer)) {
switch buffer[i] {
case ' ', '\t':
case ',':
@ -42,11 +42,11 @@ func Tokenize(buffer []byte) List {
case '\n':
tokens = append(tokens, Token{Kind: NewLine, Position: i, Bytes: newLineBytes})
case '/':
if i+1 >= len(buffer) || buffer[i+1] != '/' {
if i+1 >= Position(len(buffer)) || buffer[i+1] != '/' {
position := i
i++
for i < len(buffer) && isOperator(buffer[i]) {
for i < Position(len(buffer)) && isOperator(buffer[i]) {
i++
}
@ -54,7 +54,7 @@ func Tokenize(buffer []byte) List {
} else {
position := i
for i < len(buffer) && buffer[i] != '\n' {
for i < Position(len(buffer)) && buffer[i] != '\n' {
i++
}
@ -65,10 +65,10 @@ func Tokenize(buffer []byte) List {
case '"':
start := i
end := len(buffer)
end := Position(len(buffer))
i++
for i < len(buffer) {
for i < Position(len(buffer)) {
if buffer[i] == '"' {
end = i + 1
i++
@ -86,7 +86,7 @@ func Tokenize(buffer []byte) List {
position := i
i++
for i < len(buffer) && isIdentifier(buffer[i]) {
for i < Position(len(buffer)) && isIdentifier(buffer[i]) {
i++
}
@ -106,7 +106,7 @@ func Tokenize(buffer []byte) List {
position := i
i++
for i < len(buffer) && isNumber(buffer[i]) {
for i < Position(len(buffer)) && isNumber(buffer[i]) {
i++
}
@ -118,7 +118,7 @@ func Tokenize(buffer []byte) List {
position := i
i++
for i < len(buffer) && isOperator(buffer[i]) {
for i < Position(len(buffer)) && isOperator(buffer[i]) {
i++
}