Reduced token size

This commit is contained in:
Eduard Urbach 2024-07-20 23:33:07 +02:00
parent 43cdac5572
commit ca36d34cb9
Signed by: akyoto
GPG Key ID: C874F672B1AF20C0
4 changed files with 21 additions and 17 deletions

View File

@ -14,13 +14,13 @@ type Error struct {
Err error Err error
File *fs.File File *fs.File
Stack string Stack string
Position int Position token.Position
} }
// New generates an error message at the current token position. // New generates an error message at the current token position.
// The error message is clickable in popular editors and leads you // The error message is clickable in popular editors and leads you
// directly to the faulty file at the given line and position. // directly to the faulty file at the given line and position.
func New(err error, file *fs.File, position int) *Error { func New(err error, file *fs.File, position token.Position) *Error {
return &Error{ return &Error{
Err: err, Err: err,
File: file, File: file,
@ -48,12 +48,12 @@ func (e *Error) Error() string {
for _, t := range e.File.Tokens { for _, t := range e.File.Tokens {
if t.Position >= e.Position { if t.Position >= e.Position {
column = e.Position - lineStart column = int(e.Position) - lineStart
break break
} }
if t.Kind == token.NewLine { if t.Kind == token.NewLine {
lineStart = t.Position lineStart = int(t.Position)
line++ line++
} }
} }

View File

@ -0,0 +1,4 @@
package token
// Position is the data type for storing file offsets.
type Position = uint32

View File

@ -10,13 +10,13 @@ import (
// This makes parsing easier and allows us to do better syntax checks. // This makes parsing easier and allows us to do better syntax checks.
type Token struct { type Token struct {
Bytes []byte Bytes []byte
Position int Position Position
Kind Kind Kind Kind
} }
// End returns the position after the token. // End returns the position after the token.
func (t *Token) End() int { func (t *Token) End() Position {
return t.Position + len(t.Bytes) return t.Position + Position(len(t.Bytes))
} }
// String creates a human readable representation for debugging purposes. // String creates a human readable representation for debugging purposes.

View File

@ -18,11 +18,11 @@ var (
// Tokenize turns the file contents into a list of tokens. // Tokenize turns the file contents into a list of tokens.
func Tokenize(buffer []byte) List { func Tokenize(buffer []byte) List {
var ( var (
i int i Position
tokens = make(List, 0, 8+len(buffer)/2) tokens = make(List, 0, 8+len(buffer)/2)
) )
for i < len(buffer) { for i < Position(len(buffer)) {
switch buffer[i] { switch buffer[i] {
case ' ', '\t': case ' ', '\t':
case ',': case ',':
@ -42,11 +42,11 @@ func Tokenize(buffer []byte) List {
case '\n': case '\n':
tokens = append(tokens, Token{Kind: NewLine, Position: i, Bytes: newLineBytes}) tokens = append(tokens, Token{Kind: NewLine, Position: i, Bytes: newLineBytes})
case '/': case '/':
if i+1 >= len(buffer) || buffer[i+1] != '/' { if i+1 >= Position(len(buffer)) || buffer[i+1] != '/' {
position := i position := i
i++ i++
for i < len(buffer) && isOperator(buffer[i]) { for i < Position(len(buffer)) && isOperator(buffer[i]) {
i++ i++
} }
@ -54,7 +54,7 @@ func Tokenize(buffer []byte) List {
} else { } else {
position := i position := i
for i < len(buffer) && buffer[i] != '\n' { for i < Position(len(buffer)) && buffer[i] != '\n' {
i++ i++
} }
@ -65,10 +65,10 @@ func Tokenize(buffer []byte) List {
case '"': case '"':
start := i start := i
end := len(buffer) end := Position(len(buffer))
i++ i++
for i < len(buffer) { for i < Position(len(buffer)) {
if buffer[i] == '"' { if buffer[i] == '"' {
end = i + 1 end = i + 1
i++ i++
@ -86,7 +86,7 @@ func Tokenize(buffer []byte) List {
position := i position := i
i++ i++
for i < len(buffer) && isIdentifier(buffer[i]) { for i < Position(len(buffer)) && isIdentifier(buffer[i]) {
i++ i++
} }
@ -106,7 +106,7 @@ func Tokenize(buffer []byte) List {
position := i position := i
i++ i++
for i < len(buffer) && isNumber(buffer[i]) { for i < Position(len(buffer)) && isNumber(buffer[i]) {
i++ i++
} }
@ -118,7 +118,7 @@ func Tokenize(buffer []byte) List {
position := i position := i
i++ i++
for i < len(buffer) && isOperator(buffer[i]) { for i < Position(len(buffer)) && isOperator(buffer[i]) {
i++ i++
} }