2
0
mirror of https://git.missingno.dev/kolibrios-nvme-driver/ synced 2024-12-22 22:08:47 +01:00
kolibrios-nvme-driver/drivers/nvme/nvme.asm

1351 lines
33 KiB
NASM
Raw Normal View History

2024-04-17 21:44:10 +02:00
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ;;
;; Copyright (C) KolibriOS team 2004-2024. All rights reserved. ;;
;; Distributed under terms of the GNU General Public License ;;
;; ;;
;; GNU GENERAL PUBLIC LICENSE ;;
;; Version 2, June 1991 ;;
;; ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2024-03-28 23:43:18 +01:00
format PE DLL native
entry START
API_VERSION = 0 ;debug
SRV_GETVERSION = 0
2024-03-30 04:42:01 +01:00
__DEBUG__ = 1
__DEBUG_LEVEL__ = 1
DRIVER_VERSION = 1
DBG_INFO = 1
NULLPTR = 0
FALSE = 0
TRUE = 1
2024-03-30 04:42:01 +01:00
2024-03-28 23:43:18 +01:00
section ".flat" code readable writable executable
include "../proc32.inc"
include "../struct.inc"
include "../macros.inc"
2024-03-30 04:42:01 +01:00
include "../fdo.inc"
2024-03-28 23:43:18 +01:00
include "../pci.inc"
2024-03-30 04:42:01 +01:00
include "../peimport.inc"
2024-03-31 21:43:38 +02:00
include "nvme.inc"
2024-04-21 02:29:06 +02:00
include "macros.inc"
include "lib.inc"
include "command.inc"
2024-03-28 23:43:18 +01:00
2024-06-26 00:37:04 +02:00
struct DISKMEDIAINFO
2024-07-05 23:30:22 +02:00
flags dd ?
2024-07-05 23:27:38 +02:00
sectorsize dd ?
capacity dq ?
2024-06-26 00:37:04 +02:00
ends
2024-03-28 23:43:18 +01:00
proc START c, reason:dword
local AnythingLoadedSuccessfully db 0
2024-07-05 23:27:38 +02:00
cmp [reason], DRV_ENTRY
jne .err
2024-03-28 23:43:18 +01:00
.entry:
DEBUGF DBG_INFO, "Detecting NVMe device...\n"
2024-07-05 23:27:38 +02:00
call detect_nvme
test eax, eax
jz .err
xor ebx, ebx
mov esi, dword [p_nvme_devices]
test esi, esi
jz .err
2024-07-30 20:38:53 +02:00
sub esi, sizeof.pcidev
2024-04-28 19:50:40 +02:00
.loop:
2024-07-30 20:38:53 +02:00
add esi, sizeof.pcidev
2024-07-30 21:09:49 +02:00
push ebx esi
stdcall device_is_compat, esi
2024-07-05 23:27:38 +02:00
test eax, eax
2024-07-30 21:09:49 +02:00
jz .pop
stdcall nvme_init, esi
2024-07-05 23:27:38 +02:00
test eax, eax
2024-07-30 21:09:49 +02:00
jz .pop
pop esi ebx
stdcall add_nvme_disk, esi
jmp .next
.pop:
pop esi ebx
2024-07-11 22:19:42 +02:00
2024-07-30 21:09:49 +02:00
.next:
test eax, eax
setne [AnythingLoadedSuccessfully]
inc ebx
cmp ebx, dword [pcidevs_len]
jne .loop
cmp [AnythingLoadedSuccessfully], 0
jz .err
2024-07-05 23:27:38 +02:00
invoke RegService, my_service, service_proc
ret
.err:
2024-07-05 23:27:38 +02:00
call nvme_cleanup
2024-05-26 19:24:34 +02:00
xor eax, eax
ret
2024-03-28 23:43:18 +01:00
endp
proc service_proc stdcall, ioctl:dword
2024-06-26 00:37:04 +02:00
mov esi, [ioctl]
mov eax, [esi + IOCTL.io_code]
2024-05-26 19:24:34 +02:00
cmp eax, SRV_GETVERSION
2024-06-26 00:37:04 +02:00
jne .ret
2024-05-26 19:24:34 +02:00
2024-06-26 00:37:04 +02:00
mov eax, [esi + IOCTL.output]
cmp [esi + IOCTL.out_size], 4
jne .ret
2024-05-26 19:24:34 +02:00
mov dword [eax], API_VERSION
xor eax, eax
ret
2024-03-28 23:43:18 +01:00
2024-06-26 00:37:04 +02:00
.ret:
2024-07-05 23:27:38 +02:00
or eax, -1
ret
2024-03-28 23:43:18 +01:00
endp
2024-06-26 00:37:04 +02:00
proc add_nvme_disk stdcall, pci:dword
2024-07-05 23:27:38 +02:00
push esi
mov esi, [pci]
2024-07-20 23:20:32 +02:00
; NOTE: If the pcidev.num or pcidev.nsid is more than 9 then
; this fails to build the string correctly. Ignoring this issue
; for now since who has more than 9 NVMe SSDs on a desktop computer
; and a NSID bigger than 9 is also unlikely.
;
; Still, will address this problem in the future.
2024-07-05 23:27:38 +02:00
push 0 ; null terminator
2024-07-20 23:20:32 +02:00
movzx eax, byte [esi + pcidev.nsid]
add al, "0"
mov byte [esp], al
dec esp
mov byte [esp], "n"
dec esp
movzx eax, byte [esi + pcidev.num]
add al, "0"
mov byte [esp], al
2024-07-05 23:27:38 +02:00
push "nvme"
2024-07-20 23:20:32 +02:00
mov eax, esp
invoke DiskAdd, disk_functions, eax, [esi + pcidev.nsinfo], 0
add esp, 10
2024-07-05 23:27:38 +02:00
test eax, eax
jz @f
2024-07-11 22:19:42 +02:00
invoke DiskMediaChanged, eax, 1
2024-07-18 23:24:28 +02:00
DEBUGF DBG_INFO, "nvme%un%u: Successfully registered disk\n", [esi + pcidev.num], [esi + pcidev.nsid]
2024-07-05 23:27:38 +02:00
xor eax, eax
inc eax
pop esi
ret
2024-06-26 00:37:04 +02:00
@@:
DEBUGF DBG_INFO, "nvme%un%u: Failed to register disk\n", [esi + pcidev.num], [esi + pcidev.nsid]
2024-07-05 23:27:38 +02:00
xor eax, eax
pop esi
ret
2024-06-26 00:37:04 +02:00
endp
proc nvme_query_media stdcall, userdata:dword, info:dword
push ebx esi edi
2024-07-05 23:27:38 +02:00
mov esi, [userdata]
mov ebx, dword [esi + NSINFO.pci]
2024-07-05 23:27:38 +02:00
mov edi, [info]
mov dword [edi + DISKMEDIAINFO.flags], 0
2024-07-23 20:59:08 +02:00
mov cl, byte [esi + NSINFO.lbads]
xor eax, eax
inc eax
shl eax, cl
DEBUGF DBG_INFO, "nvme%un%u (Query Media): Sector size = %u\n", [ebx + pcidev.num], [esi + NSINFO.nsid], eax
2024-07-05 23:27:38 +02:00
mov dword [edi + DISKMEDIAINFO.sectorsize], eax
mov eax, dword [esi + NSINFO.capacity]
mov dword [edi + DISKMEDIAINFO.capacity], eax
mov eax, dword [esi + NSINFO.capacity + 4]
mov dword [edi + DISKMEDIAINFO.capacity + 4], eax
DEBUGF DBG_INFO, "nvme%un%u (Query Media): Capacity = %u + %u sectors\n", [ebx + pcidev.num], [esi + NSINFO.nsid], [esi + NSINFO.capacity], [esi + NSINFO.capacity + 4]
2024-07-05 23:27:38 +02:00
xor eax, eax
pop edi esi ebx
2024-07-05 23:27:38 +02:00
ret
2024-06-26 00:37:04 +02:00
endp
; returns 1 if the given NSID is a an active NSID, returns
; 0 otherwise
proc is_active_namespace stdcall, pci:dword, nsid:dword
2024-07-05 23:27:38 +02:00
push esi edi
invoke KernelAlloc, 0x1000
test eax, eax
jnz @f
pop edi esi
ret
@@:
2024-07-05 23:27:38 +02:00
mov esi, eax
invoke GetPhysAddr
stdcall nvme_identify, [pci], [nsid], eax, CNS_IDNS
test eax, eax
jz .not_active_nsid
2024-07-05 23:27:38 +02:00
xor ecx, ecx
@@:
2024-07-05 23:27:38 +02:00
mov eax, dword [esi + ecx * 4]
test eax, eax
jnz .is_active_nsid
inc ecx
cmp ecx, 0x1000 / 4
jne @b
.not_active_nsid:
2024-07-05 23:27:38 +02:00
invoke KernelFree, esi
pop edi esi
xor eax, eax
ret
.is_active_nsid:
2024-07-05 23:27:38 +02:00
invoke KernelFree, esi
pop edi esi
xor eax, eax
inc eax
ret
endp
; See page 248 of the NVMe 1.4 specification for reference
; Returns the number of namespaces that are active, note this
; doesn't mean if EAX = 5, then namespaces 1-5 will be active.
; This also sets [pci + pcidev.nn] and [pci + pcidev.nsids]
; to appropriate values
proc determine_active_nsids stdcall, pci:dword
2024-07-05 23:27:38 +02:00
push ebx esi
mov esi, [pci]
xor ebx, ebx
xor ecx, ecx
inc ecx
.loop:
2024-07-05 23:27:38 +02:00
cmp ecx, dword [esi + pcidev.nn]
ja .ret
push ecx
stdcall is_active_namespace, [pci], ecx
pop ecx
test eax, eax
jz .not_active_namespace
mov ebx, ecx
jmp .ret
.not_active_namespace:
2024-07-05 23:27:38 +02:00
inc ecx
jmp .loop
.ret:
2024-07-05 23:27:38 +02:00
pop edi esi
mov eax, ebx
ret
endp
2024-07-18 23:01:26 +02:00
proc build_prp_list stdcall, nprps:dword, buf:dword, prp_list_ptr:dword
2024-07-05 23:27:38 +02:00
push esi ebx edi
2024-07-18 23:01:26 +02:00
sub esp, 4
2024-07-18 23:01:26 +02:00
; stack:
; [esp]: virtual pointer to first PRP list
; here, we store the pointer to the very first
; PRP list so that free_prp_list can free the
; entire PRP list if something goes wrong, it
; also serves as our return value placeholder
mov dword [esp], 0
2024-07-05 23:27:38 +02:00
xor edi, edi
xor esi, esi
mov ecx, [nprps]
shl ecx, 3 ; multiply by 8 since each PRP pointer is a QWORD
; we'll store consecutive PRP list buffers here, for example
; given 2 PRP lists, we allocate 2 continuous pages
2024-07-18 23:01:26 +02:00
push ecx
2024-07-05 23:27:38 +02:00
invoke KernelAlloc, ecx ; store pointers to the PRP entries here
2024-07-18 23:01:26 +02:00
pop ecx
2024-07-05 23:27:38 +02:00
test eax, eax
jz .err
2024-07-18 23:01:26 +02:00
mov dword [esp], eax
2024-07-05 23:27:38 +02:00
mov edi, eax
2024-07-12 19:55:44 +02:00
mov eax, [prp_list_ptr]
mov dword [eax], edi
2024-07-18 23:01:26 +02:00
shr ecx, 1
stdcall memsetdz, edi, ecx
2024-07-18 23:01:26 +02:00
; note we assume buf is page-aligned
mov esi, [buf]
.build_prp_list:
; ensure we don't cross a page boundary
mov ebx, [nprps]
cmp ebx, PAGE_SIZE / 8
jb @f
mov ebx, PAGE_SIZE / 8
sub [nprps], ebx
@@:
2024-07-05 23:27:38 +02:00
xor ecx, ecx
2024-07-18 23:01:26 +02:00
cmp dword [esp], edi
je .loop
2024-07-04 20:42:06 +02:00
2024-07-18 23:01:26 +02:00
; we need to store the pointer of the next
; PRP list to the previous PRP list last entry
mov eax, edi
invoke GetPhysAddr
mov dword [edi - 8], eax
mov dword [edi - 4], 0
.loop:
2024-07-18 23:01:26 +02:00
mov eax, esi
invoke GetPhysAddr
2024-07-21 23:08:51 +02:00
mov dword [edi + ecx * 8], eax
mov dword [edi + ecx * 8 - 4], 0
2024-07-05 23:27:38 +02:00
add esi, PAGE_SIZE
inc ecx
cmp ecx, ebx
jne .loop
2024-07-18 23:01:26 +02:00
; check if we we need to build another PRP list
add edi, PAGE_SIZE
cmp ebx, PAGE_SIZE / 8
je .build_prp_list
2024-07-18 23:01:26 +02:00
; PRP list successfully created
mov eax, dword [esp]
invoke GetPhysAddr
2024-07-18 23:01:26 +02:00
add esp, 4
pop edi ebx esi
ret
2024-07-04 20:42:06 +02:00
.err:
2024-07-18 23:01:26 +02:00
add esp, 4
2024-07-05 23:27:38 +02:00
pop edi ebx esi
xor eax, eax
ret
endp
2024-07-12 19:55:44 +02:00
proc alloc_dptr stdcall, ns:dword, prps_ptr:dword, numsectors:dword, prp_list_ptr:dword, buf:dword
2024-07-23 20:10:42 +02:00
push ebx esi edi
mov esi, [ns]
2024-07-12 19:59:54 +02:00
mov edi, [prps_ptr]
mov eax, [buf]
invoke GetPhysAddr
2024-07-05 23:27:38 +02:00
mov dword [edi], eax
2024-07-23 20:10:42 +02:00
mov cl, byte [esi + NSINFO.lbads]
mov ebx, PAGE_SIZE
shr ebx, cl
mov edx, [numsectors]
; is the buffer offset portion equal to 0?
2024-07-23 20:10:42 +02:00
mov eax, [buf]
mov ecx, eax
and eax, PAGE_SIZE - 1
2024-08-01 19:49:18 +02:00
mov eax, ebx
jnz @f
; is the number of sectors less than or equal to one memory page?
2024-07-23 20:10:42 +02:00
cmp edx, ebx
jbe .success
2024-07-23 20:59:08 +02:00
shl ebx, 1 ; it is page aligned, so set ebx to 2 memory pages
@@:
2024-07-12 21:38:45 +02:00
; is the number of sectors greater than one or two memory pages?
2024-07-23 20:10:42 +02:00
cmp edx, ebx
2024-07-12 21:38:45 +02:00
ja .build_prp_list
2024-07-05 23:27:38 +02:00
; set PRP2
2024-07-23 20:10:42 +02:00
mov eax, ecx
2024-07-12 20:03:19 +02:00
and eax, not (PAGE_SIZE - 1)
add eax, PAGE_SIZE
invoke GetPhysAddr
2024-07-05 23:27:38 +02:00
mov dword [edi + 4], eax
jmp .success
2024-07-11 22:19:42 +02:00
.build_prp_list:
2024-08-01 19:49:18 +02:00
mov ebx, ecx
mov ecx, eax
2024-07-23 20:10:42 +02:00
and ebx, not (PAGE_SIZE - 1)
add ebx, PAGE_SIZE
2024-07-11 22:19:42 +02:00
mov eax, [numsectors]
2024-07-05 23:27:38 +02:00
xor edx, edx
div ecx
2024-07-23 20:10:42 +02:00
stdcall build_prp_list, eax, ebx, [prp_list_ptr]
2024-07-05 23:27:38 +02:00
test eax, eax
2024-07-11 22:19:42 +02:00
jz .err
2024-07-05 23:27:38 +02:00
mov dword [edi + 4], eax
2024-07-04 20:42:06 +02:00
.success:
2024-07-05 23:27:38 +02:00
xor eax, eax
inc eax
2024-07-23 20:10:42 +02:00
pop edi esi ebx
2024-07-05 23:27:38 +02:00
ret
2024-07-23 20:10:42 +02:00
.err:
xor eax, eax
pop edi esi ebx
ret
2024-07-01 23:10:36 +02:00
endp
nvme_read:
2024-07-05 23:27:38 +02:00
mov edx, NVM_CMD_READ
jmp nvme_readwrite
nvme_write:
2024-07-05 23:27:38 +02:00
mov edx, NVM_CMD_WRITE
2024-07-01 23:10:36 +02:00
proc nvme_readwrite stdcall, ns:dword, buf:dword, start_sector:qword, numsectors_ptr:dword
2024-07-08 20:49:43 +02:00
2024-07-05 23:27:38 +02:00
push ebx esi edi
2024-07-11 22:19:42 +02:00
sub esp, 20
2024-07-12 19:55:44 +02:00
2024-07-24 01:43:37 +02:00
; TODO: check if numsectors exceeds IDENTC.MDTS?
2024-07-12 19:55:44 +02:00
; stack:
; [esp] - PRP1
; [esp + 4] - PRP2
; [esp + 8] - command type (read or write)
; [esp + 12] - original numsectors value
; [esp + 16] - virtual pointer to PRP2 PRP list (if allocated, 0 if not)
mov ebx, esp
2024-07-18 23:24:28 +02:00
mov esi, [ns]
mov edi, [buf]
2024-07-18 23:24:28 +02:00
mov eax, [numsectors_ptr]
mov eax, dword [eax]
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "buf: %x, start_sector: %u:%u, numsectors: %u\n", [buf], [start_sector + 4], [start_sector], eax
2024-07-23 20:14:43 +02:00
mov dword [ebx + 4], 0 ; PRP2 entry (0 by default)
2024-07-18 23:24:28 +02:00
mov dword [ebx + 8], edx ; command type (read or write)
mov dword [ebx + 12], eax ; save original numsectors value
2024-07-23 20:14:43 +02:00
mov dword [ebx + 16], 0 ; virtual pointer to PRP2 PRP list (not allocated by default)
2024-07-08 20:49:43 +02:00
2024-07-11 22:19:42 +02:00
mov ecx, ebx
add ecx, 16
2024-07-12 19:55:44 +02:00
2024-07-05 23:27:38 +02:00
; Note that [esp] will contain the value of PRP1 and [esp + 4] will
2024-07-12 19:55:44 +02:00
; contain the value of PRP2. If PRP2 is a PRP list, then [esp + 16] will point
; to the allocated PRP list (after this call, only if it completes successfully)
2024-07-18 23:24:28 +02:00
stdcall alloc_dptr, esi, ebx, eax, ecx, [buf]
2024-07-05 23:27:38 +02:00
test eax, eax
jz .fail
2024-07-12 19:55:44 +02:00
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "PRP1: %x, PRP2: %x\n", [ebx], [ebx + 4]
mov eax, dword [start_sector]
; According to the NVMe specification, the NLB field in the I/O read and write
; commands is a 0-based value (i.e., 0 is equivalant to 1, 1 is equivalant to 2, ...)
; As far as I know, KolibriOS doesn't follow this mechanism so let's just decrement the
; value and it should have the same effect.
mov ecx, dword [ebx + 12]
dec ecx
; TODO: add non-blocking mechanisms later on
2024-07-30 20:38:53 +02:00
push eax
mov eax, dword [esi + NSINFO.pci]
mov dword [eax + pcidev.spinlock], 1
pop eax
2024-07-05 23:27:38 +02:00
stdcall nvme_io_rw, [esi + NSINFO.pci], \
1, \
[esi + NSINFO.nsid], \
dword [ebx], \
dword [ebx + 4], \
eax, \
dword [start_sector + 4], \
ecx, \
dword [ebx + 8]
2024-07-01 23:10:36 +02:00
2024-08-01 19:49:18 +02:00
stdcall nvme_poll, [esi + NSINFO.pci]
test eax, eax
jz .fail
; free PRP list (if allocated)
2024-08-01 19:49:18 +02:00
mov eax, dword [ebx + 16]
test eax, eax
jz @f
2024-08-01 19:49:18 +02:00
invoke KernelFree, eax
@@:
xor eax, eax
add esp, 20
pop edi esi ebx
ret
2024-07-08 22:30:29 +02:00
.fail:
2024-08-01 19:49:18 +02:00
; free PRP list (if allocated)
mov eax, dword [ebx + 16]
test eax, eax
jz @f
invoke KernelFree, eax
@@:
mov ebx, [numsectors_ptr]
mov dword [ebx], 0
add esp, 20
pop edi esi ebx
or eax, -1 ; generic disk error
ret
2024-07-01 23:10:36 +02:00
endp
proc detect_nvme
2024-05-26 19:24:34 +02:00
invoke GetPCIList
mov esi, eax
mov ebx, eax
2024-03-28 23:43:18 +01:00
.check_dev:
mov eax, dword [esi + PCIDEV.class]
and eax, 0x00ffff00 ; retrieve class/subclass code only
cmp eax, 0x00010800 ; Mass Storage Controller - Non-Volatile Memory Controller
2024-07-05 23:27:38 +02:00
je .found_dev
2024-03-28 23:43:18 +01:00
.next_dev:
mov esi, dword [esi + PCIDEV.fd]
cmp esi, ebx
2024-07-05 23:27:38 +02:00
jne .check_dev
.exit_success:
xor eax, eax
inc eax
ret
.found_dev:
; skip PCIDEV.owner check if the PCI device pointer has already been
; allocated (without this check, more than 1 NVMe device cannot be
; registered)
mov eax, dword [p_nvme_devices]
test eax, eax
jnz @f
cmp dword [esi + PCIDEV.owner], 0
jnz .err
@@:
PDEBUGF DBG_INFO, "PCI(%u.%u.%u): Detected NVMe device...\n", [esi + PCIDEV.bus], [esi + PCIDEV.devfn]
2024-07-05 23:27:38 +02:00
cmp dword [pcidevs_len], TOTAL_PCIDEVS
jne @f
DEBUGF DBG_INFO, "Can't add any more NVMe devices...\n"
2024-07-05 23:27:38 +02:00
jmp .exit_success
@@:
2024-07-05 23:27:38 +02:00
inc dword [pcidevs_len]
cmp dword [p_nvme_devices], 0
jnz @f ; was the pointer already allocated?
invoke KernelAlloc, sizeof.pcidev * TOTAL_PCIDEVS
2024-07-05 23:27:38 +02:00
test eax, eax
jz .err
2024-07-05 23:27:38 +02:00
mov dword [p_nvme_devices], eax
mov dword [esi + PCIDEV.owner], eax
@@:
2024-07-05 23:27:38 +02:00
mov ecx, dword [pcidevs_len]
dec ecx
mov edi, dword [p_nvme_devices]
2024-07-30 20:38:53 +02:00
mov edx, ecx
imul edx, sizeof.pcidev
lea edi, [edi + edx]
2024-05-26 00:56:58 +02:00
movzx eax, byte [esi + PCIDEV.bus]
mov byte [edi + pcidev.bus], al
movzx eax, byte [esi + PCIDEV.devfn]
mov byte [edi + pcidev.devfn], al
mov dword [edi + pcidev.num], ecx
2024-05-26 00:56:58 +02:00
2024-07-05 23:27:38 +02:00
jmp .next_dev
2024-04-28 02:11:57 +02:00
.err:
2024-07-05 23:27:38 +02:00
xor eax, eax
ret
endp
2024-04-29 03:06:13 +02:00
proc device_is_compat stdcall, pci:dword
2024-07-05 23:27:38 +02:00
push esi edx ecx
mov esi, [pci]
invoke PciRead8, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.interrupt_line
mov byte [esi + pcidev.iline], al
invoke PciRead32, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.base_addr_0
and eax, 0xfffffff0
test eax, eax
jz .failure
mov edx, eax
invoke MapIoMem, eax, 0x2000, PG_SW+PG_NOCACHE
2024-07-05 23:27:38 +02:00
test eax, eax
jz .failure
mov dword [esi + pcidev.io_addr], eax
mov eax, dword [eax + NVME_MMIO.CAP + 4]
and eax, CAP_DSTRD
mov byte [esi + pcidev.dstrd], al
mov eax, dword [esi + pcidev.io_addr]
2024-07-05 23:27:38 +02:00
mov eax, dword [eax + NVME_MMIO.VS]
DEBUGF DBG_INFO, "nvme%u: Controller version: 0x%x\n", [esi + pcidev.num], eax
2024-07-05 23:27:38 +02:00
mov dword [esi + pcidev.version], eax
pop ecx edx esi
xor eax, eax
inc eax
ret
2024-04-29 03:06:13 +02:00
.failure:
2024-07-05 23:27:38 +02:00
PDEBUGF DBG_INFO, "PCI(%u.%u.%u): something went wrong checking NVMe device compatibility\n", byte [esi + pcidev.bus], byte [esi + pcidev.devfn]
pop ecx edx esi
xor eax, eax
ret
endp
; nvme_init: Initializes the NVMe controller
2024-04-29 03:06:13 +02:00
proc nvme_init stdcall, pci:dword
2024-07-05 23:27:38 +02:00
push ebx esi edi
mov esi, dword [pci]
; Check the PCI header to see if interrupts are disabled, if so
; we have to re-enable them
invoke PciRead16, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.command
and eax, not (1 shl 10)
; Enable Bus Master bit, memory space access, and I/O space access. QEMU automatically sets the
; bus master bit, but Virtualbox does not. Not sure about the other bits though, but let's set them
; to 1 to anyway just to be extra cautious.
; See: https://git.kolibrios.org/GSoC/kolibrios-nvme-driver/issues/1#issuecomment-467
or eax, (1 shl 2) or (1 shl 1) or 1
invoke PciWrite16, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.command, eax
2024-07-26 20:42:46 +02:00
; Check if the device has a pointer to the capabilities list (status register bit 4 set to 1)
; though this check is probably unnecessary since all PCIe devices should have this bit set to 1
invoke PciRead16, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.status
test ax, (1 shl 4)
jz .exit_fail
invoke PciRead8, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.cap_ptr
and eax, 0xfc ; bottom two bits are reserved, so mask them before we access the configuration space
mov edi, eax
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: Checking capabilities...\n", [esi + pcidev.num]
2024-07-26 20:42:46 +02:00
; We need to check if there are any MSI/MSI-X capabilities, and if so, make sure they're disabled since
; we're using old fashioned pin-based interrupts (for now)
.read_cap:
invoke PciRead32, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], edi
add edi, 2
cmp al, MSICAP_CID
je .got_msi_cap
cmp al, MSIXCAP_CID
je .got_msix_cap
movzx edi, ah
test edi, edi
jnz .read_cap
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: MSI/MSI-X capability not found\n", [esi + pcidev.num]
2024-07-26 20:42:46 +02:00
jmp .end_cap_parse
.got_msi_cap:
DEBUGF DBG_INFO, "nvme%u: Found MSI capability\n", [esi + pcidev.num]
invoke PciRead32, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], edi
and eax, not MSICAP_MSIE
invoke PciWrite32, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], edi
jmp .end_cap_parse
.got_msix_cap:
DEBUGF DBG_INFO, "nvme%u: Found MSI-X capability\n", [esi + pcidev.num]
invoke PciRead32, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], edi
and eax, not MSIXCAP_MXE
invoke PciWrite32, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], edi
.end_cap_parse:
2024-07-05 23:27:38 +02:00
mov edi, dword [esi + pcidev.io_addr]
; check maximum queue entries supported
2024-08-05 18:49:28 +02:00
mov eax, dword [edi + NVME_MMIO.CAP]
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "nvme%u: Maximum queue entries available is %u (required: %u)\n", [esi + pcidev.num], ax, SQ_ENTRIES
2024-07-05 23:27:38 +02:00
cmp ax, SQ_ENTRIES
jb .exit_fail
2024-08-05 18:49:28 +02:00
if __DEBUG__
test eax, CAP_CQR
setnz al
DEBUGF DBG_INFO, "nvme%u: Contiguous queues required: %u\n", [esi + pcidev.num], al
end if
2024-07-05 23:27:38 +02:00
2024-08-06 18:46:40 +02:00
; Check if NVM command set is supported
2024-07-05 23:27:38 +02:00
mov eax, dword [edi + NVME_MMIO.CAP + 4]
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "nvme%u: Checking if NVM command set is supported...\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
test eax, CAP_CSS_NVM_CMDSET
jz .exit_fail
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: OK... NVM command set supported\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
stdcall nvme_disable_ctrl, esi
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "nvme%u: Checking if memory page size is supported...\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
mov eax, dword [edi + NVME_MMIO.CAP + 4]
2024-08-06 18:46:40 +02:00
mov edx, eax
and edx, CAP_MPSMIN
shr edx, 16
cmp edx, NVM_MPS
2024-07-05 23:27:38 +02:00
ja .exit_fail
and eax, CAP_MPSMAX
shr eax, 20
cmp eax, NVM_MPS
jb .exit_fail
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: OK... memory page size supported\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
; Configure IOSQES, IOCQES, AMS, MPS, CSS
; CSS = 0 (NVM Command Set)
; AMS = 0 (Round Robin)
; MPS = 0 (4KiB Pages)
; IOSQES = 6 (64B)
; IOCQES = 4 (16B)
xor eax, eax
2024-07-05 23:27:38 +02:00
or eax, (4 shl 20) or (6 shl 16)
mov dword [edi + NVME_MMIO.CC], eax
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "nvme%u: OK... controller is configured to appropriate settings\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
; Configure Admin Queue Attributes
xor eax, eax
2024-07-05 23:27:38 +02:00
or eax, NVM_ASQS or (NVM_ACQS shl 16)
mov dword [edi + NVME_MMIO.AQA], eax
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: Admin queue attributes: 0x%x\n", [esi + pcidev.num], eax
2024-07-05 23:27:38 +02:00
; Allocate list of queues
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "nvme%u: Allocating Administrator and I/O queues...\n",, [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
invoke KernelAlloc, sizeof.NVM_QUEUE_ENTRY * (LAST_QUEUE_ID + 1)
test eax, eax
jz .exit_fail
mov dword [esi + pcidev.queue_entries], eax
mov edi, eax
2024-07-08 21:49:43 +02:00
stdcall memsetdz, eax, sizeof.NVM_QUEUE_ENTRY * (LAST_QUEUE_ID + 1) / 4
2024-07-05 23:27:38 +02:00
; Allocate submission/completion queue pointers
2024-08-05 18:49:28 +02:00
xor ebx, ebx
2024-07-30 23:22:28 +02:00
.init_queues:
invoke KernelAlloc, QUEUE_ALLOC_SIZE
2024-07-05 23:27:38 +02:00
test eax, eax
jz .exit_fail
2024-08-05 18:49:28 +02:00
mov dword [edi + ebx + NVM_QUEUE_ENTRY.cq_ptr], eax
2024-08-05 22:17:54 +02:00
mov edx, eax
add eax, CQ_ALLOC_SIZE
2024-08-05 22:17:54 +02:00
mov dword [edi + ebx + NVM_QUEUE_ENTRY.sq_ptr], eax
stdcall memsetdz, edx, QUEUE_ALLOC_SIZE / 4
2024-07-30 23:22:28 +02:00
; Initialize command entries
invoke KernelAlloc, sizeof.NVMQCMD * CQ_ENTRIES
test eax, eax
jz .exit_fail
2024-08-05 18:49:28 +02:00
mov dword [edi + ebx + NVM_QUEUE_ENTRY.cmd_ptr], eax
2024-07-30 23:22:28 +02:00
mov esi, eax
2024-08-05 18:49:28 +02:00
push ebx
xor ebx, ebx
2024-07-30 23:22:28 +02:00
.init_cmd_entries:
invoke KernelAlloc, sizeof.MUTEX
test eax, eax
2024-08-05 22:17:54 +02:00
jz .exit_fail_cleanup
mov dword [esi + NVMQCMD.mutex_ptr], eax
mov dword [esi + NVMQCMD.cid], ebx
2024-07-30 23:22:28 +02:00
mov ecx, eax
invoke MutexInit
inc ebx
add esi, sizeof.NVMQCMD
cmp ebx, CQ_ENTRIES
2024-07-30 23:22:28 +02:00
jne .init_cmd_entries
2024-08-05 18:49:28 +02:00
pop ebx
add ebx, sizeof.NVM_QUEUE_ENTRY
cmp ebx, (LAST_QUEUE_ID + 1) * sizeof.NVM_QUEUE_ENTRY
2024-07-30 23:22:28 +02:00
jne .init_queues
2024-07-05 23:27:38 +02:00
; Configure Admin Completion Queue Base Address
2024-07-05 23:27:38 +02:00
mov esi, [pci]
mov esi, dword [esi + pcidev.io_addr]
mov eax, dword [edi + NVM_QUEUE_ENTRY.cq_ptr]
invoke GetPhysAddr
mov dword [esi + NVME_MMIO.ACQ], eax
mov dword [esi + NVME_MMIO.ACQ + 4], 0
if __DEBUG__
push esi
mov esi, [pci]
DEBUGF DBG_INFO, "nvme%u: Admin completion queue base address: 0x%x\n", [esi + pcidev.num], eax
pop esi
end if
; Configure Admin Submission Queue Base Address
mov eax, dword [edi + NVM_QUEUE_ENTRY.sq_ptr]
invoke GetPhysAddr
mov dword [esi + NVME_MMIO.ASQ], eax
mov dword [esi + NVME_MMIO.ASQ + 4], 0
if __DEBUG__
push esi
mov esi, [pci]
DEBUGF DBG_INFO, "nvme%u: Admin submission queue base address: 0x%x\n", [esi + pcidev.num], eax
pop esi
end if
2024-07-05 23:27:38 +02:00
; Attach interrupt handler
mov esi, [pci]
movzx eax, byte [esi + pcidev.iline]
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: Attaching interrupt handler to IRQ %u\n", [esi + pcidev.num], eax
2024-07-05 23:27:38 +02:00
invoke AttachIntHandler, eax, irq_handler, 0
test eax, eax
jz .exit_fail
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: Successfully attached interrupt handler\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
; Restart the controller
stdcall nvme_enable_ctrl, esi
2024-07-05 23:27:38 +02:00
invoke KernelAlloc, 0x1000
test eax, eax
jz .exit_fail
mov edi, eax
invoke GetPhysAddr
; pci:dword, nsid:dword, dptr:dword, cns:byte
stdcall nvme_identify, [pci], 0, eax, CNS_IDCS
test eax, eax
jz .exit_fail
2024-07-05 23:27:38 +02:00
mov eax, dword [edi + IDENTC.nn]
mov dword [esi + pcidev.nn], eax
DEBUGF DBG_INFO, "nvme%u: Namespace Count: %u\n", [esi + pcidev.num], eax
; Note that the specification only allows ASCII strings that contain code
; values between 0x20 (' ') and 0x7E ('~'). Strings are left justified and
; padded with spaces (at least according to the 1.4.0 spec) which means there
; is no null terminator anywhere. To prevent garbage or repeated values from
; being printed to the debug log, I have inserted a 0 byte at the end of each
; string.
2024-07-05 23:27:38 +02:00
lea ebx, byte [edi + IDENTC.sn]
mov byte [ebx + 19], 0
DEBUGF DBG_INFO, "nvme%u: Serial Number: %s\n", [esi + pcidev.num], ebx
2024-07-05 23:27:38 +02:00
add ebx, 20
mov byte [ebx + 39], 0
DEBUGF DBG_INFO, "nvme%u: Model Number: %s\n", [esi + pcidev.num], ebx
add ebx, 40
mov byte [ebx + 7], 0
DEBUGF DBG_INFO, "nvme%u: Firmware Revision: %s\n", [esi + pcidev.num], ebx
2024-07-05 23:27:38 +02:00
mov edx, dword [esi + pcidev.version]
cmp edx, VS140
jb @f
; This is a reserved field in pre-1.4 controllers
mov al, byte [edi + IDENTC.cntrltype]
cmp al, CNTRLTYPE_IO_CONTROLLER
jne .exit_fail
;DEBUGF DBG_INFO, "nvme%u: I/O controller detected...\n", [esi + pcidev.num]
2024-06-13 00:03:01 +02:00
@@:
2024-07-05 23:27:38 +02:00
; TODO: check IDENTC.AVSCC
mov al, byte [edi + IDENTC.sqes]
and al, 11110000b
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: IDENTC.SQES = %u\n", [esi + pcidev.num], al
2024-07-05 23:27:38 +02:00
cmp al, 0x60 ; maximum submission queue size should at least be 64 bytes
jb .exit_fail
mov al, byte [edi + IDENTC.cqes]
and al, 11110000b
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: IDENTC.CQES = %u\n", [esi + pcidev.num], al
2024-07-05 23:27:38 +02:00
and al, 0x40 ; maximum completion queue entry size should at least be 16 bytes
jb .exit_fail
invoke KernelFree, edi
mov eax, 1 or (1 shl 16) ; CDW11 (set the number of queues we want)
2024-07-30 20:38:53 +02:00
mov esi, [pci]
mov dword [esi + pcidev.spinlock], 1
2024-07-05 23:27:38 +02:00
stdcall set_features, [pci], NULLPTR, FID_NUMBER_OF_QUEUES, eax
2024-07-30 20:38:53 +02:00
stdcall nvme_poll, esi
test eax, eax
jz .exit_fail
2024-07-05 23:27:38 +02:00
mov esi, dword [esi + pcidev.queue_entries]
mov esi, dword [esi + NVM_QUEUE_ENTRY.cq_ptr]
mov eax, dword [esi + sizeof.CQ_ENTRY + CQ_ENTRY.cdw0]
;DEBUGF DBG_INFO, "nvme%u: Set Features CDW0: 0x%x\n", [esi + pcidev.num], eax
2024-07-05 23:27:38 +02:00
test ax, ax ; Number of I/O Submission Queues allocated
jz .exit_fail
shl eax, 16
test ax, ax ; Number of I/O Completion Queues allocated
jnz .exit_fail
; Create I/O Queues
; (TODO: create N queue pairs for N CPU cores, see page 8 of NVMe 1.4 spec for an explaination)
2024-07-05 23:27:38 +02:00
mov esi, [pci]
mov edi, esi
mov esi, dword [esi + pcidev.queue_entries]
2024-08-01 18:02:07 +02:00
add esi, sizeof.NVM_QUEUE_ENTRY
2024-07-05 23:27:38 +02:00
mov eax, dword [esi + NVM_QUEUE_ENTRY.cq_ptr]
invoke GetPhysAddr
stdcall create_io_completion_queue, [pci], eax, 1, IEN_ON
test eax, eax
jz .exit_fail
;DEBUGF DBG_INFO, "nvme%u: Successfully created I/O completion queue 1\n", [edi + pcidev.num]
2024-07-05 23:27:38 +02:00
mov eax, dword [esi + NVM_QUEUE_ENTRY.sq_ptr]
invoke GetPhysAddr
stdcall create_io_submission_queue, [pci], eax, 1, 1
jz .exit_fail
;DEBUGF DBG_INFO, "nvme%u: Successfully created I/O submission queue 1\n", [edi + pcidev.num]
2024-07-05 23:27:38 +02:00
stdcall determine_active_nsids, [pci]
test eax, eax
jz .exit_fail ; No active NSIDS
mov esi, [pci]
mov dword [esi + pcidev.nsid], eax
DEBUGF DBG_INFO, "nvme%u: Found active NSID: %u\n", [esi + pcidev.num], eax
2024-07-05 23:27:38 +02:00
invoke KernelAlloc, 0x1000
test eax, eax
jz .exit_fail
mov edi, eax
invoke GetPhysAddr
stdcall nvme_identify, [pci], [esi + pcidev.nsid], eax, CNS_IDNS
test eax, eax
jz .exit_fail
2024-07-05 23:27:38 +02:00
invoke KernelAlloc, sizeof.NSINFO
test eax, eax
jz .exit_fail
mov ebx, eax
mov dword [esi + pcidev.nsinfo], eax
mov al, byte [edi + IDENTN.nsfeat]
mov byte [ebx + NSINFO.features], al
;DEBUGF DBG_INFO, "nvme%un%u: Namespace Features: 0x%x\n", [esi + pcidev.num], [esi + pcidev.nsid], al
mov eax, dword [esi + pcidev.nsid]
mov dword [ebx + NSINFO.nsid], eax
2024-07-05 23:27:38 +02:00
mov dword [ebx + NSINFO.pci], esi
mov eax, dword [edi + IDENTN.nsze]
mov dword [ebx + NSINFO.size], eax
mov eax, dword [edi + IDENTN.nsze + 4]
mov dword [ebx + NSINFO.size + 4], eax
mov eax, dword [edi + IDENTN.ncap]
mov dword [ebx + NSINFO.capacity], eax
mov eax, dword [edi + IDENTN.ncap + 4]
mov dword [ebx + NSINFO.capacity + 4], eax
;DEBUGF DBG_INFO, "nvme%un%u: Namespace Size: %u + %u logical blocks\n", [esi + pcidev.num], [esi + pcidev.nsid], [edi + IDENTN.nsze], [edi + IDENTN.nsze + 4]
;DEBUGF DBG_INFO, "nvme%un%u: Namespace Capacity: %u + %u logical blocks\n", [esi + pcidev.num], [esi + pcidev.nsid], [edi + IDENTN.ncap], [edi + IDENTN.ncap + 4]
2024-07-23 20:59:08 +02:00
mov eax, dword [edi + IDENTN.lbaf0]
shr eax, 16 ; Get LBADS
; KolibriOS only supports a LBADS of 512, so if it's a higher value then we
; have to ignore this namespace
2024-07-23 20:59:08 +02:00
cmp al, SUPPORTED_LBADS
jne .exit_fail
2024-07-23 20:59:08 +02:00
mov byte [ebx + NSINFO.lbads], al
2024-07-05 23:27:38 +02:00
invoke KernelFree, edi
if 0
2024-07-21 23:08:51 +02:00
invoke KernelAlloc, 0x6000
test eax, eax
jz .exit_fail
mov edi, eax
invoke KernelAlloc, 0x8
test eax, eax
jz .exit_fail
mov edx, NVM_CMD_READ
2024-08-01 19:49:18 +02:00
mov dword [eax], 6
add edi, 0x5
2024-07-30 20:38:53 +02:00
mov dword [esi + pcidev.spinlock], 1
2024-08-01 19:49:18 +02:00
stdcall nvme_readwrite, [esi + pcidev.nsinfo], edi, 0x0, 0, eax
2024-07-30 20:38:53 +02:00
stdcall nvme_poll, esi
test eax, eax
jz .exit_fail
DEBUGF DBG_INFO, "STRING: %s\n", edi
2024-07-21 23:08:51 +02:00
add edi, 0x2000
DEBUGF DBG_INFO, "STRING: %s\n", edi
2024-07-11 22:19:42 +02:00
end if
2024-07-05 23:27:38 +02:00
DEBUGF DBG_INFO, "nvme%u: Successfully initialized driver\n", [esi + pcidev.num]
2024-05-26 19:24:34 +02:00
xor eax, eax
inc eax
2024-07-05 23:27:38 +02:00
pop edi esi ebx
2024-05-26 19:24:34 +02:00
ret
2024-03-31 21:43:38 +02:00
2024-08-05 22:17:54 +02:00
.exit_fail_cleanup:
pop eax
2024-04-02 01:47:14 +02:00
.exit_fail:
2024-07-05 23:27:38 +02:00
mov esi, [pci]
2024-07-27 18:16:56 +02:00
DEBUGF DBG_INFO, "nvme%u: Failed to initialize controller\n", [esi + pcidev.num]
mov edi, dword [esi + pcidev.io_addr]
mov eax, dword [edi + NVME_MMIO.CSTS]
test eax, CSTS_CFS
jz @f
DEBUGF DBG_INFO, "nvme%u: A fatal controller error has occurred\n", [esi + pcidev.num]
@@:
2024-07-05 23:27:38 +02:00
xor eax, eax
pop edi esi ebx
ret
2024-03-31 21:43:38 +02:00
endp
2024-06-12 21:47:32 +02:00
proc get_new_cid stdcall, pci:dword, y:dword
2024-06-12 01:57:02 +02:00
mov eax, [pci]
mov eax, dword [eax + pcidev.queue_entries]
2024-07-05 23:27:38 +02:00
mov ecx, [y]
shl ecx, SIZEOF_NVM_QUEUE_ENTRY
movzx eax, word [eax + ecx + NVM_QUEUE_ENTRY.head]
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "get_new_cid: %u\n", eax
2024-07-05 23:27:38 +02:00
ret
2024-06-12 01:57:02 +02:00
endp
proc nvme_disable_ctrl stdcall, pci:dword
2024-05-24 01:15:34 +02:00
2024-07-27 18:16:56 +02:00
; TODO: Add timeout of CAP.TO seconds
2024-07-05 23:27:38 +02:00
push esi edi
mov esi, [pci]
DEBUGF DBG_INFO, "nvme%u: Disabling Controller...\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
mov edi, dword [esi + pcidev.io_addr]
and dword [edi + NVME_MMIO.CC], 0xfffffffe ; CC.EN = 0
2024-05-24 01:15:34 +02:00
; Wait for controller to be brought to idle state, CSTS.RDY should be cleared to 0 when this happens
.wait:
2024-07-05 23:27:38 +02:00
test dword [edi + NVME_MMIO.CSTS], CSTS_RDY
jnz .wait
DEBUGF DBG_INFO, "nvme%u: Successfully disabled controller\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
pop edi esi
ret
2024-05-24 01:15:34 +02:00
endp
proc nvme_enable_ctrl stdcall, pci:dword
2024-07-27 18:16:56 +02:00
; TODO: Add timeout of CAP.TO seconds
2024-07-05 23:27:38 +02:00
push esi edi
mov esi, [pci]
DEBUGF DBG_INFO, "nvme%u: Enabling Controller...\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
mov edi, dword [esi + pcidev.io_addr]
or dword [edi + NVME_MMIO.CC], 1 ; CC.EN = 1
; Wait for controller to be brought into active state, CSTS.RDY should be set to 1 when this happens
.wait:
2024-07-05 23:27:38 +02:00
test dword [edi + NVME_MMIO.CSTS], CSTS_RDY
jz .wait
DEBUGF DBG_INFO, "nvme%u: Successfully enabled controller\n", [esi + pcidev.num]
2024-07-05 23:27:38 +02:00
pop edi esi
ret
endp
2024-07-30 20:38:53 +02:00
proc nvme_poll stdcall, pci:dword
2024-07-30 20:38:53 +02:00
push esi
mov esi, [pci]
xor ecx, ecx
@@:
inc ecx
cmp ecx, 0x10000000
2024-07-26 12:14:45 +02:00
je @f
xor eax, eax
inc eax
2024-07-30 20:38:53 +02:00
xchg eax, dword [esi + pcidev.spinlock]
test eax, eax
jnz @b
; lock was released, return 1
2024-07-30 20:38:53 +02:00
pop esi
xor eax, eax
inc eax
ret
@@:
; timeout: lock wasn't released, return 0
2024-07-30 20:38:53 +02:00
pop esi
xor eax, eax
ret
endp
2024-06-09 19:07:01 +02:00
; Writes to completion queue 'y' head doorbell
proc cqyhdbl_write stdcall, pci:dword, y:dword, cqh:dword
2024-06-09 19:07:01 +02:00
2024-07-05 23:27:38 +02:00
push esi edi
mov esi, [pci]
; 1000h + ((2y + 1) * (4 << CAP.DSTRD))
mov eax, [y]
shl al, 1
inc al
mov edx, 4
mov cl, byte [esi + pcidev.dstrd]
shl dx, cl
imul dx, ax
add dx, 0x1000
mov ecx, [y]
shl ecx, SIZEOF_NVM_QUEUE_ENTRY
2024-07-05 23:27:38 +02:00
mov edi, dword [esi + pcidev.queue_entries]
lea edi, dword [edi + ecx]
mov eax, [cqh]
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "nvme%u: Writing to CQ%u doorbell register 0x%x: %u\n", [esi + pcidev.num], [y], dx, ax
2024-08-01 19:49:18 +02:00
mov esi, dword [esi + pcidev.io_addr]
2024-07-05 23:27:38 +02:00
mov word [esi + edx], ax ; Write to CQyHDBL
mov word [edi + NVM_QUEUE_ENTRY.head], ax
; Unlock the mutex now that the command is complete
mov edi, dword [edi + NVM_QUEUE_ENTRY.cmd_ptr]
mov ecx, [cqh]
shl ecx, SIZEOF_NVMQCMD
add edi, ecx
mov ecx, dword [edi + NVMQCMD.mutex_ptr]
invoke MutexUnlock
2024-07-05 23:27:38 +02:00
pop edi esi
ret
2024-06-09 19:07:01 +02:00
endp
; Writes to submission queue 'y' tail doorbell
proc sqytdbl_write stdcall, pci:dword, y:word, cmd:dword
2024-07-05 23:27:38 +02:00
push ebx esi edi
mov edi, [pci]
mov edi, dword [edi + pcidev.queue_entries]
2024-08-01 18:02:07 +02:00
movzx ebx, [y]
shl ebx, SIZEOF_NVM_QUEUE_ENTRY
2024-08-01 18:02:07 +02:00
lea edi, [edi + ebx]
mov eax, dword [edi + NVM_QUEUE_ENTRY.cmd_ptr]
2024-08-01 18:02:07 +02:00
mov edx, dword [edi + NVM_QUEUE_ENTRY.sq_ptr]
2024-07-05 23:27:38 +02:00
mov esi, [cmd]
mov ecx, dword [esi + SQ_ENTRY.cdw0]
shr ecx, 16 ; Get CID
2024-08-01 18:18:37 +02:00
mov ebx, ecx
shl ebx, SIZEOF_NVM_QUEUE_ENTRY
2024-08-01 18:18:37 +02:00
add ebx, eax
shl ecx, SIZEOF_SQ_ENTRY
2024-08-01 18:02:07 +02:00
lea edx, [edx + ecx]
stdcall memcpyd, edx, esi, sizeof.SQ_ENTRY / 4
2024-08-01 18:18:37 +02:00
mov ecx, dword [ebx + NVMQCMD.mutex_ptr]
invoke MutexLock
2024-07-05 23:27:38 +02:00
2024-08-01 18:02:07 +02:00
mov esi, [pci]
2024-08-10 19:05:28 +02:00
mov ax, word [edi + NVM_QUEUE_ENTRY.tail]
inc ax
cmp ax, NVM_ASQS
jb @f
2024-07-05 23:27:38 +02:00
xor ax, ax
@@:
2024-07-05 23:27:38 +02:00
; 1000h + (2y * (4 << CAP.DSTRD))
movzx ebx, [y]
shl ebx, 1
mov edx, 4
mov cl, byte [esi + pcidev.dstrd]
shl edx, cl
imul edx, ebx
add edx, 0x1000
2024-08-09 19:51:52 +02:00
DEBUGF DBG_INFO, "nvme%u: Writing to SQ%u doorbell register 0x%x: %u\n", [esi + pcidev.num], [y], dx, ax
2024-08-01 18:02:07 +02:00
mov word [edi + NVM_QUEUE_ENTRY.tail], ax
2024-07-05 23:27:38 +02:00
mov esi, dword [esi + pcidev.io_addr]
mov word [esi + edx], ax
pop edi esi ebx
ret
endp
2024-06-24 23:37:56 +02:00
proc is_queue_full stdcall, tail:word, head:word
2024-07-05 23:27:38 +02:00
push bx
mov ax, [tail]
mov bx, [head]
cmp ax, bx
je .not_full
test bx, bx
jnz @f
cmp ax, NVM_ASQS
jne @f
pop bx
2024-07-05 23:27:38 +02:00
xor eax, eax
inc eax
ret
2024-06-24 23:37:56 +02:00
@@:
2024-07-05 23:27:38 +02:00
cmp ax, bx
jae .not_full
sub ax, bx
cmp ax, 1
jne .not_full
pop bx
2024-07-05 23:27:38 +02:00
xor eax, eax
inc eax
ret
.not_full:
2024-07-05 23:27:38 +02:00
pop bx
xor eax, eax
ret
2024-06-24 23:37:56 +02:00
endp
proc consume_cq_entries stdcall, pci:dword, queue:dword
2024-07-05 23:27:38 +02:00
push esi edi
mov esi, [pci]
mov ecx, [queue]
shl ecx, SIZEOF_NVM_QUEUE_ENTRY
2024-07-05 23:27:38 +02:00
mov esi, dword [esi + pcidev.queue_entries]
lea esi, [esi + ecx]
movzx ecx, word [esi + NVM_QUEUE_ENTRY.head]
cmp cx, word [esi + NVM_QUEUE_ENTRY.tail]
je .end
2024-08-10 19:05:28 +02:00
inc ecx
cmp ecx, NVM_ACQS
jb @f
xor ecx, ecx
mov word [esi + NVM_QUEUE_ENTRY.head], cx
@@:
2024-07-05 23:27:38 +02:00
stdcall cqyhdbl_write, [pci], [queue], ecx
.end:
2024-07-05 23:27:38 +02:00
pop edi esi
xor eax, eax
ret
endp
proc irq_handler
2024-06-14 22:12:53 +02:00
push ebx esi edi
mov edi, dword [p_nvme_devices]
mov esi, edi
sub esi, sizeof.pcidev
mov ebx, dword [pcidevs_len]
xor ecx, ecx
.check_who_raised_irq:
add esi, sizeof.pcidev
inc ecx
cmp ecx, ebx
ja .not_our_irq
2024-07-05 23:27:38 +02:00
mov edi, dword [esi + pcidev.io_addr]
mov dword [edi + NVME_MMIO.INTMS], 0x3
2024-08-01 19:49:18 +02:00
;mov eax, dword [esi + pcidev.spinlock]
;test eax, eax
;jz @f ; not locked, so it must be an I/O command
stdcall consume_cq_entries, esi, 0
@@:
stdcall consume_cq_entries, esi, 1
2024-07-05 23:27:38 +02:00
; Interrupt handled by driver, return 1
mov dword [edi + NVME_MMIO.INTMC], 0x3
xor eax, eax
2024-07-30 20:38:53 +02:00
xchg eax, dword [esi + pcidev.spinlock] ; unlock spinlock
pop edi esi ebx
mov eax, 1
2024-07-05 23:27:38 +02:00
ret
2024-06-03 18:02:36 +02:00
.not_our_irq:
2024-07-05 23:27:38 +02:00
; Interrupt not handled by driver, return 0
pop edi esi ebx
2024-07-05 23:27:38 +02:00
xor eax, eax
ret
2024-05-30 22:06:50 +02:00
endp
2024-03-31 21:43:38 +02:00
proc nvme_cleanup
DEBUGF DBG_INFO, "nvme_cleanup called\n"
mov esi, dword [p_nvme_devices]
test esi, esi
jnz @f
2024-07-05 23:27:38 +02:00
ret
2024-05-26 22:54:59 +02:00
@@:
mov ebx, dword [pcidevs_len]
sub esi, sizeof.pcidev
.get_pcidev:
add esi, sizeof.pcidev
; Free the queues
mov edi, dword [esi + pcidev.queue_entries]
test edi, edi
jz .free_pcidevs
sub edi, sizeof.NVM_QUEUE_ENTRY
xor ecx, ecx
.get_queue:
add edi, sizeof.NVM_QUEUE_ENTRY
; TODO: Check if I/O completion and submission queue exist
; before deleting?
push ecx
test ecx, ecx
jz @f ; we don't want to delete the admin queue
push ecx
stdcall delete_io_completion_queue, esi, ecx
pop ecx
push ecx
stdcall delete_io_submission_queue, esi, ecx
pop ecx
@@:
invoke KernelFree, dword [edi + NVM_QUEUE_ENTRY.cq_ptr]
invoke KernelFree, dword [edi + NVM_QUEUE_ENTRY.sq_ptr]
pop ecx
; Free the commands and their respective mutexes
push edi ecx
xor ecx, ecx
mov edi, dword [edi + NVM_QUEUE_ENTRY.cmd_ptr]
sub edi, sizeof.NVMQCMD
.get_cmd:
add edi, sizeof.NVMQCMD
push ecx
invoke KernelFree, dword [edi + NVMQCMD.mutex_ptr]
pop ecx
inc ecx
cmp ecx, SQ_ENTRIES
jne .get_cmd
pop ecx edi
inc ecx
cmp ecx, LAST_QUEUE_ID
jne .get_queue
invoke KernelFree, dword [esi + pcidev.queue_entries]
.free_pcidevs:
invoke KernelFree, dword [p_nvme_devices]
2024-07-05 23:27:38 +02:00
ret
2024-03-28 23:43:18 +01:00
2024-04-29 03:06:13 +02:00
endp
2024-03-28 23:43:18 +01:00
;all initialized data place here
align 4
2024-07-05 23:27:38 +02:00
p_nvme_devices dd 0
pcidevs_len dd 0
my_service db "nvme",0 ;max 16 chars include zero
2024-07-05 23:27:38 +02:00
disk_functions:
dd disk_functions.end - disk_functions
dd 0 ; no close function
dd 0 ; no closemedia function
dd nvme_query_media
dd nvme_read
dd nvme_write
2024-07-05 23:27:38 +02:00
dd 0 ; no flush function
dd 0 ; use default cache size
.end:
if __DEBUG__
include_debug_strings
end if
2024-03-28 23:43:18 +01:00
align 4
data fixups
end data
2024-07-30 18:39:00 +02:00
; vim: syntax=fasm