2
0
mirror of https://git.missingno.dev/kolibrios-nvme-driver/ synced 2024-12-22 22:08:47 +01:00
kolibrios-nvme-driver/drivers/nvme/nvme.asm

1363 lines
34 KiB
NASM
Raw Normal View History

2024-04-17 21:44:10 +02:00
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ;;
;; Copyright (C) KolibriOS team 2004-2024. All rights reserved. ;;
;; Distributed under terms of the GNU General Public License ;;
;; ;;
;; GNU GENERAL PUBLIC LICENSE ;;
;; Version 2, June 1991 ;;
;; ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2024-03-28 23:43:18 +01:00
format PE DLL native
entry START
API_VERSION equ 0 ;debug
SRV_GETVERSION equ 0
2024-03-30 04:42:01 +01:00
__DEBUG__ = 1
__DEBUG_LEVEL__ = 1
DRIVER_VERSION = 1
DBG_INFO = 1
NULLPTR = 0
2024-03-30 04:42:01 +01:00
; flags for alloc_dptr
PRP1_ENTRY_ALLOCATED = 1
PRP1_LIST_ALLOCATED = 2
PRP2_ENTRY_ALLOCATED = 4
PRP2_LIST_ALLOCATED = 8
2024-03-28 23:43:18 +01:00
section ".flat" code readable writable executable
include "../proc32.inc"
include "../struct.inc"
include "../macros.inc"
2024-03-30 04:42:01 +01:00
include "../fdo.inc"
2024-03-28 23:43:18 +01:00
include "../pci.inc"
2024-03-30 04:42:01 +01:00
include "../peimport.inc"
2024-03-31 21:43:38 +02:00
include "nvme.inc"
2024-04-21 02:29:06 +02:00
include "macros.inc"
2024-06-13 00:03:01 +02:00
include "lib.asm"
2024-03-28 23:43:18 +01:00
2024-06-26 00:37:04 +02:00
struct DISKMEDIAINFO
flags dd ?
sectorsize dd ?
capacity dq ?
ends
2024-03-28 23:43:18 +01:00
proc START c, reason:dword
2024-05-26 19:24:34 +02:00
cmp [reason], DRV_ENTRY
jne .err
2024-03-28 23:43:18 +01:00
.entry:
DEBUGF DBG_INFO, "Detecting NVMe hardware...\n"
2024-05-26 19:24:34 +02:00
call detect_nvme
2024-04-28 02:11:57 +02:00
test eax, eax
2024-05-26 19:24:34 +02:00
jz .err
mov eax, dword [p_nvme_devices]
test eax, eax
2024-04-28 02:11:57 +02:00
jz .err
2024-04-28 19:50:40 +02:00
xor ecx, ecx
2024-04-28 19:50:40 +02:00
.loop:
2024-05-26 00:56:58 +02:00
mov ebx, dword [p_nvme_devices]
stdcall device_is_compat, ebx
2024-04-28 19:50:40 +02:00
test eax, eax
jz @f
2024-05-26 00:56:58 +02:00
stdcall nvme_init, ebx
test eax, eax
jz .err
2024-03-28 23:43:18 +01:00
2024-05-26 22:54:59 +02:00
;@@:
;inc ecx
;cmp ecx, dword [pcidevs_len]
;jne .loop
2024-06-26 00:37:04 +02:00
stdcall add_nvme_disk, [p_nvme_devices]
test eax, eax
jz .err
2024-05-26 19:24:34 +02:00
invoke RegService, my_service, service_proc
ret
.err:
call nvme_cleanup
2024-05-26 19:24:34 +02:00
xor eax, eax
ret
2024-03-28 23:43:18 +01:00
endp
proc service_proc stdcall, ioctl:dword
2024-06-26 00:37:04 +02:00
mov esi, [ioctl]
mov eax, [esi + IOCTL.io_code]
2024-05-26 19:24:34 +02:00
cmp eax, SRV_GETVERSION
2024-06-26 00:37:04 +02:00
jne .ret
2024-05-26 19:24:34 +02:00
2024-06-26 00:37:04 +02:00
mov eax, [esi + IOCTL.output]
cmp [esi + IOCTL.out_size], 4
jne .ret
2024-05-26 19:24:34 +02:00
mov dword [eax], API_VERSION
xor eax, eax
ret
2024-03-28 23:43:18 +01:00
2024-06-26 00:37:04 +02:00
.ret:
2024-03-28 23:43:18 +01:00
or eax, -1
ret
2024-03-28 23:43:18 +01:00
endp
2024-06-26 00:37:04 +02:00
proc add_nvme_disk stdcall, pci:dword
push esi
mov esi, [pci]
push 0 ; null terminator
push dword [esi + pcidev.nsid]
push "n"
push dword [esi + pcidev.num]
push "nvme"
mov eax, esp
invoke DiskAdd, disk_functions, eax, [esi + pcidev.nsinfo], 0
add esp, 20
test eax, eax
jz @f
DEBUGF DBG_INFO, "nvme%u: Successfully registered disk\n", [esi + pcidev.num]
xor eax, eax
inc eax
pop esi
ret
@@:
DEBUGF DBG_INFO, "nvme%u: Failed to register disk\n", [esi + pcidev.num]
xor eax, eax
pop esi
ret
endp
proc nvme_query_media stdcall, userdata:dword, info:dword
push esi edi
mov esi, [userdata]
mov edi, [info]
mov dword [edi + DISKMEDIAINFO.flags], 0
mov eax, dword [esi + NSINFO.lbads]
mov dword [edi + DISKMEDIAINFO.sectorsize], eax
mov eax, dword [esi + NSINFO.capacity]
mov dword [edi + DISKMEDIAINFO.capacity], eax
mov eax, dword [esi + NSINFO.capacity + 4]
mov dword [edi + DISKMEDIAINFO.capacity + 4], eax
xor eax, eax
pop edi esi
ret
endp
proc set_cdw0 stdcall, pci:dword, y:dword, opcode:byte
2024-06-04 22:06:32 +02:00
stdcall get_new_cid, [pci], [y]
2024-06-04 22:06:32 +02:00
shl eax, 16
or al, [opcode]
2024-06-04 22:06:32 +02:00
ret
endp
2024-06-09 19:07:01 +02:00
; See pages 161-205 of the NVMe 1.4 specification for reference
proc nvme_identify stdcall, pci:dword, nsid:dword, dptr:dword, cns:byte
2024-06-14 22:12:53 +02:00
sub esp, sizeof.SQ_ENTRY
; It's important to check if CNS is a valid value here. In revision 1.0
; CNS is a 1 bit field and a two bit field in revision 1.1, using invalid
; values results in undefined behavior (see page 162 of NVMe 1.4 spec)
if __DEBUG__
push esi
2024-06-14 22:12:53 +02:00
mov esi, [pci]
mov esi, dword [esi + pcidev.io_addr]
mov eax, dword [esi + NVME_MMIO.VS]
cmp eax, VS110
jne @f
cmp [cns], 11b
jbe .ok
DEBUGF DBG_INFO, "(NVMe) FATAL ERROR: INVALID CNS VALUE ON v1.1.0 CONTROLLERS\n"
jmp .err
@@:
cmp eax, VS100
jne .ok
cmp [cns], 1b
jbe .ok
DEBUGF DBG_INFO, "(NVMe) FATAL ERROR: INVALID CNS VALUE ON v1.0.0 CONTROLLERS\n"
jmp .err
.err:
jmp @b
.ok:
pop esi
end if
2024-06-14 22:12:53 +02:00
stdcall memset, esp, 0, sizeof.SQ_ENTRY
2024-06-03 20:14:59 +02:00
2024-05-28 21:01:25 +02:00
mov eax, [nsid]
2024-06-14 22:12:53 +02:00
mov dword [esp + SQ_ENTRY.nsid], eax
2024-05-28 21:01:25 +02:00
mov eax, [dptr]
2024-06-14 22:12:53 +02:00
mov dword [esp + SQ_ENTRY.dptr], eax
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_IDENTIFY
2024-06-14 22:12:53 +02:00
mov dword [esp + SQ_ENTRY.cdw0], eax
2024-05-28 21:01:25 +02:00
mov al, [cns]
2024-06-14 22:12:53 +02:00
mov byte [esp + SQ_ENTRY.cdw10], al
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
2024-06-03 20:14:59 +02:00
2024-06-14 22:12:53 +02:00
add esp, sizeof.SQ_ENTRY
ret
2024-03-30 04:42:01 +01:00
endp
; See pages 348-349 of the NVMe 1.4 specification for information on creating namespaces
proc create_namespace stdcall, pci:dword, cid:word
push esi
invoke AllocPage
test eax, eax
jz .fail
invoke GetPhysAddr
stdcall nvme_identify, [pci], 0xffffffff, eax, CNS_IDNS
.fail:
pop esi
ret
endp
; returns 1 if the given NSID is a an active NSID, returns
; 0 otherwise
proc is_active_namespace stdcall, pci:dword, nsid:dword
push esi edi
invoke KernelAlloc, 0x1000
test eax, eax
jnz @f
pop edi esi
ret
@@:
mov esi, eax
invoke GetPhysAddr
stdcall nvme_identify, [pci], [nsid], eax, CNS_IDNS
xor ecx, ecx
@@:
mov eax, dword [esi + ecx * 4]
test eax, eax
jnz .is_active_nsid
inc ecx
cmp ecx, 0x1000 / 4
jne @b
.not_active_nsid:
invoke KernelFree, esi
pop edi esi
xor eax, eax
ret
.is_active_nsid:
invoke KernelFree, esi
pop edi esi
xor eax, eax
inc eax
ret
endp
; See page 248 of the NVMe 1.4 specification for reference
; Returns the number of namespaces that are active, note this
; doesn't mean if EAX = 5, then namespaces 1-5 will be active.
; This also sets [pci + pcidev.nn] and [pci + pcidev.nsids]
; to appropriate values
proc determine_active_nsids stdcall, pci:dword
push ebx esi
mov esi, [pci]
xor ebx, ebx
xor ecx, ecx
inc ecx
.loop:
cmp ecx, dword [esi + pcidev.nn]
ja .ret
push ecx
stdcall is_active_namespace, [pci], ecx
pop ecx
test eax, eax
jz .not_active_namespace
mov ebx, ecx
jmp .ret
.not_active_namespace:
inc ecx
jmp .loop
.ret:
pop edi esi
mov eax, ebx
ret
endp
; See page 101 of the NVMe 1.4 specification for reference
proc create_io_completion_queue stdcall, pci:dword, prp1:dword, qid:dword, ien:byte
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_CRE_IO_COMPLETION_QUEUE
mov dword [esp + SQ_ENTRY.cdw0], eax
mov eax, [prp1]
mov dword [esp + SQ_ENTRY.dptr], eax
mov eax, sizeof.CQ_ENTRY shl 16 ; CDW10.QSIZE
or eax, [qid] ; CDW10.QID
mov dword [esp + SQ_ENTRY.cdw10], eax
movzx eax, [ien] ; CDW11.IEN
or eax, 0x1 ; CDW11.PC
; Don't set CDW11.IV since we're not using MSI-X or MSI vector
mov dword [esp + SQ_ENTRY.cdw11], eax
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
add esp, sizeof.SQ_ENTRY
ret
endp
; See page 103-104 of the NVMe 1.4 specification for reference
proc create_io_submission_queue stdcall, pci:dword, prp1:dword, qid:dword, cqid:word
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_CRE_IO_SUBMISSION_QUEUE
mov dword [esp + SQ_ENTRY.cdw0], eax
mov eax, [prp1]
mov dword [esp + SQ_ENTRY.dptr], eax
mov eax, sizeof.SQ_ENTRY shl 16 ; CDW10.QSIZE
or eax, [qid]
mov dword [esp + SQ_ENTRY.cdw10], eax
movzx eax, [cqid]
shl eax, 16 ; CDW11.CQID
or eax, 0x1 ; CDW11.PC (always set this to 1 as some devices may not support non-contiguous pages)
; TODO: Set CDW10.QPRIO
mov dword [esp + SQ_ENTRY.cdw11], eax
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
add esp, sizeof.SQ_ENTRY
ret
endp
; See page 95-96 of the NVMe 1.4 specification for reference
proc abort stdcall, pci:dword, cid:word, sqid:word
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_ABORT
mov dword [esp + SQ_ENTRY.cdw0], eax
movzx eax, [cid]
shl eax, 16
or eax, word [sqid]
mov dword [esp + SQ_ENTRY.cdw10], eax
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
add esp, sizeof.SQ_ENTRY
ret
endp
; See page 205 of the NVMe 1.4 specification for reference
2024-06-14 22:12:53 +02:00
proc set_features stdcall, pci:dword, dptr:dword, fid:byte, cdw11:dword
2024-06-14 22:12:53 +02:00
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_SET_FEATURES
2024-06-14 22:12:53 +02:00
mov dword [esp + SQ_ENTRY.cdw0], eax
mov eax, [dptr]
2024-06-14 22:12:53 +02:00
mov dword [esp + SQ_ENTRY.dptr], eax
movzx eax, [fid]
;or eax, 1 shl 31 ; CDW10.SV
2024-06-14 22:12:53 +02:00
mov dword [esp + SQ_ENTRY.cdw10], eax
mov eax, [cdw11]
mov dword [esp + SQ_ENTRY.cdw11], eax
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
2024-06-14 22:12:53 +02:00
add esp, sizeof.SQ_ENTRY
ret
endp
2024-06-04 21:43:12 +02:00
; See page 105 of the NVMe 1.4 specification for reference
proc delete_io_completion_queue stdcall, pci:dword, qid:word
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_DEL_IO_COMPLETION_QUEUE
mov dword [esp + SQ_ENTRY.cdw0], eax
mov ax, [qid]
mov word [esp + SQ_ENTRY.cdw10], ax
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
add esp, sizeof.SQ_ENTRY
ret
endp
2024-06-04 21:43:12 +02:00
; See page 114-116 of the NVMe 1.4 specification for reference
proc get_features stdcall, pci:dword, dptr:dword, sel:byte, fid:byte
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_GET_FEATURES
mov dword [esp + SQ_ENTRY.cdw0], eax
movzx eax, [sel]
and eax, 111b
shl eax, 8 ; CDW10.SEL
or eax, byte [fid] ; CDW10.FID
mov dword [esp + SQ_ENTRY.cdw10], eax
2024-06-04 21:43:12 +02:00
mov eax, [dptr]
mov dword [esp + SQ_ENTRY.dptr], eax
2024-06-04 21:43:12 +02:00
; TODO: Implement CDW14.UUID?
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
add esp, sizeof.SQ_ENTRY
2024-06-04 21:43:12 +02:00
ret
endp
; See page 105-106 of the NVMe 1.4 specification for reference
proc delete_io_submission_queue stdcall, pci:dword, qid:word
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_DEL_IO_SUBMISSION_QUEUE
mov dword [esp + SQ_ENTRY.cdw0], eax
mov ax, [qid]
mov word [esp + SQ_ENTRY.cdw10], ax
stdcall sqytdbl_write, [pci], ADMIN_QUEUE, esp
add esp, sizeof.SQ_ENTRY
ret
endp
2024-06-08 18:25:29 +02:00
; See page 117-118 of the NVMe 1.4 specification for reference
; INCOMPLETE
proc get_log_page stdcall, pci:dword, dptr:dword, lid:byte
sub esp, sizeof.SQ_ENTRY
stdcall memset, esp, 0, sizeof.SQ_ENTRY
stdcall set_cdw0, [pci], ADMIN_QUEUE, ADM_CMD_GET_LOG_PAGE
mov dword [esp + SQ_ENTRY.cdw0], eax
2024-06-08 18:25:29 +02:00
mov eax, [dptr]
mov dword [esp + SQ_ENTRY.dptr], eax
add esp, sizeof.SQ_ENTRY
2024-06-08 18:25:29 +02:00
ret
endp
proc build_prp_list stdcall, nprps:dword
push ebx edi
mov ebx, [nprps]
push ebx
shl ebx, 2
invoke KernelAlloc, ebx
pop ebx
test eax, eax
jz .err
mov edi, eax
xor ecx, ecx
@@:
invoke KernelAlloc, 0x1000
test eax, eax
jz .cleanup
mov dword [edi + ecx], eax
inc ecx
cmp ecx, ebx
jb @b
mov dword [esi + ecx], edi
mov eax, edi
pop edi ebx
ret
.cleanup:
xor ecx, ecx
@@:
invoke KernelFree, dword [edi + ecx]
inc ecx
cmp ecx, edx
jb @b
.err:
xor eax, eax
pop edi ebx
ret
endp
2024-07-01 23:10:36 +02:00
; Returns a PRP/PRP List
; ----------------------------------------------------------------------------------------------------
; flags (used for identifying how to free the memory used by the PRPs after the command is completed):
; BIT 1 SET: allocated PRP1 entry
; BIT 2 SET: allocated PRP1 list
; BIT 3 SET: allocated PRP2 entry
; BIT 4 SET: allocated PRP2 list
proc alloc_dptr stdcall, ns:dword, prps_ptr:dword, start_sector:qword, numsectors:dword, flags_ptr:dword
push esi edi ebx
mov esi, [ns]
mov edi, [flags_ptr]
mov ebx, dword [esi + NSINFO.pg_sectors]
mov esi, [prps_ptr]
mov dword [esi + 4], 0 ; set PRP #2 to reserved by default
cmp ebx, dword [edi]
ja .numsectors_over_1pg
.alloc_prp1_entry:
2024-07-01 23:10:36 +02:00
invoke KernelAlloc, 0x1000
test eax, eax
jz .err
or byte [edi], PRP1_ENTRY_ALLOCATED
mov dword [esi], eax
pop ebx edi esi
ret
.numsectors_over_1pg:
; check if offset portion of PRP1 is 0
mov eax, dword [esi]
and eax, (PAGE_SIZE shl NVM_MPS) - 1
test eax, eax
jz .check_prp2_cond_bii
; check PRP2 condition b) i.
cmp ebx, [numsectors]
jne .check_prp2_cond_cici
invoke KernelAlloc, 0x1000
test eax, eax
jz .err
mov dword [esi + 4], eax
or byte [edi], PRP2_ENTRY_ALLOCATED
jmp .alloc_prp1_entry
.check_prp2_cond_bii:
mov eax, ebx
shl eax, 1
cmp eax, [numsectors]
ja .check_prp2_cond_c
invoke KernelAlloc, 0x1000
test eax, eax
jz .err
mov dword [esi + 4], eax
or byte [edi], PRP2_ENTRY_ALLOCATED
jmp .alloc_prp1_entry
.check_prp2_cond_c:
; is the command data transfer length greater or equal to two memory pages?
mov eax, ebx
shl eax, 1
cmp eax, [numsectors_ptr]
jb .alloc_prp1_entry
stdcall build_prp_list, 4 ; TODO: replace 4 with proper number of PRPs later
test eax, eax
jz .err
mov dword [esi + 4], eax
jmp .alloc_prp1_entry
.err:
xor eax, eax
pop ebx edi esi
2024-07-01 23:10:36 +02:00
ret
endp
proc nvme_read ns:dword, dst:dword, start_sector:qword, numsectors_ptr:dword
2024-07-01 23:10:36 +02:00
push ebx esi edi
sub esp, 8
stdcall alloc_dptr, [ns], dword [esp], dword [esp + 4], [start_sector], [numsectors_ptr]
2024-07-01 23:10:36 +02:00
test eax, eax
jz .end
mov edi, eax
invoke GetPhysAddr
mov esi, [ns]
mov ebx, [numsectors_ptr]
mov ebx, [ebx]
stdcall nvme_io_rw, [esi + NSINFO.pci], \ ; PCI device
1, \ ; QID (1 for now)
[esi + NSINFO.nsid], \ ; NSID
eax, \ ; DPTR
dword [start_sector], \ ; SLBA_LO
dword [start_sector + 4], \ ; SLBA_HI
ebx, \ ; NLB
NVM_CMD_READ ; Command opcode
; assume command completes successfully for now
mov ecx, dword [esi + NSINFO.pg_sectors]
cmp ebx, ecx
ja .is_prp_list
; only 1-2 pages are used, which makes our life easier
mov ecx, dword [esi + NSINFO.lbads]
imul ecx, ebx
mov esi, edi
mov edi, [dst]
rep movsd
add esp, 8
pop edi esi ebx
2024-07-01 23:10:36 +02:00
xor eax, eax ; TODO: add proper return value later
ret
.is_prp_list:
.end:
add esp, 8
2024-07-01 23:10:36 +02:00
pop edi esi ebx
xor eax, eax
ret
endp
2024-07-01 19:56:00 +02:00
; See page 258-261 (read) and 269-271 (write) of the NVMe 1.4 specification for reference
proc nvme_io_rw stdcall, pci:dword, qid:word, nsid:dword, prp1:dword, prp2:dword, slba_lo:dword, slba_hi:dword, nlb:word, opcode:dword
; TODO: Use IDENTC.NOIOB to construct read/write commands that don't
; cross the I/O boundary to achieve optimal performance
2024-06-28 01:19:06 +02:00
;
; Read AWUN/NAWUN
sub esp, sizeof.SQ_ENTRY
2024-06-27 23:10:25 +02:00
stdcall memset, esp, 0, sizeof.SQ_ENTRY
movzx ecx, [qid]
2024-07-01 19:56:00 +02:00
stdcall set_cdw0, [pci], ecx, [opcode]
mov dword [esp + SQ_ENTRY.cdw0], eax ; CDW0
mov eax, [prp1]
2024-06-27 23:10:25 +02:00
mov dword [esp + SQ_ENTRY.dptr], eax
mov eax, [prp2]
mov dword [esp + SQ_ENTRY.dptr + 8], eax
2024-06-28 00:31:51 +02:00
mov eax, [nsid]
mov dword [esp + SQ_ENTRY.nsid], eax
mov eax, [slba_lo]
mov dword [esp + SQ_ENTRY.cdw10], eax
2024-07-01 19:56:00 +02:00
mov eax, [slba_hi]
mov dword [esp + SQ_ENTRY.cdw11], eax
2024-06-27 23:10:25 +02:00
movzx eax, [nlb]
mov word [esp + SQ_ENTRY.cdw12], ax
2024-06-27 23:10:25 +02:00
movzx ecx, [qid]
stdcall sqytdbl_write, [pci], ecx, esp
add esp, sizeof.SQ_ENTRY
ret
endp
proc detect_nvme
2024-05-26 19:24:34 +02:00
invoke GetPCIList
2024-03-28 23:43:18 +01:00
mov edx, eax
2024-03-28 23:43:18 +01:00
.check_dev:
mov ebx, dword [eax + PCIDEV.class]
and ebx, 0x00ffff00 ; retrieve class/subclass code only
cmp ebx, 0x00010800 ; Mass Storage Controller - Non-Volatile Memory Controller
je .found_dev
2024-03-28 23:43:18 +01:00
.next_dev:
mov eax, dword [eax + PCIDEV.fd]
2024-03-28 23:43:18 +01:00
cmp eax, edx
jne .check_dev
2024-04-28 02:11:57 +02:00
jmp .exit_success
.found_dev:
2024-04-28 02:11:57 +02:00
push edx eax
2024-04-29 03:06:13 +02:00
PDEBUGF DBG_INFO, "PCI(%u.%u.%u): Detected NVMe device...\n", byte [eax + PCIDEV.bus], byte [eax + PCIDEV.devfn]
2024-04-28 02:11:57 +02:00
cmp dword [pcidevs_len], TOTAL_PCIDEVS
jne @f
2024-04-28 02:11:57 +02:00
pop eax edx
jmp .exit_success
@@:
2024-04-28 02:11:57 +02:00
inc dword [pcidevs_len]
mov ebx, dword [p_nvme_devices]
test ebx, ebx
jnz @f
2024-05-26 00:56:58 +02:00
invoke KernelAlloc, sizeof.pcidev
test eax, eax
2024-04-28 02:11:57 +02:00
jz .err_no_mem
mov dword [p_nvme_devices], eax
2024-05-26 00:56:58 +02:00
DEBUGF DBG_INFO, "(NVMe) Allocated pcidev struct at 0x%x\n", [p_nvme_devices]
@@:
mov ecx, dword [pcidevs_len]
2024-04-28 02:11:57 +02:00
dec ecx
pop eax
2024-05-26 00:56:58 +02:00
mov ebx, dword [p_nvme_devices]
2024-04-28 02:11:57 +02:00
movzx edx, byte [eax + PCIDEV.bus]
2024-05-26 00:56:58 +02:00
mov byte [ebx + pcidev.bus], dl
2024-04-28 02:11:57 +02:00
movzx edx, byte [eax + PCIDEV.devfn]
2024-05-26 00:56:58 +02:00
mov byte [ebx + pcidev.devfn], dl
mov dword [ebx + pcidev.num], ecx
2024-05-26 00:56:58 +02:00
2024-04-28 02:11:57 +02:00
pop edx
jmp .next_dev
.err_no_mem:
pop eax edx
xor eax, eax
ret
2024-04-28 02:11:57 +02:00
.exit_success:
xor eax, eax
inc eax
ret
endp
2024-04-29 03:06:13 +02:00
proc device_is_compat stdcall, pci:dword
2024-05-30 01:29:01 +02:00
push esi edx ecx
2024-05-29 21:43:14 +02:00
mov esi, [pci]
2024-05-30 22:06:50 +02:00
invoke PciRead8, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.interrupt_line
mov byte [esi + pcidev.iline], al
2024-05-29 21:43:14 +02:00
invoke PciRead32, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.base_addr_0
and eax, 0xfffffff0
test eax, eax
2024-04-29 03:06:13 +02:00
jz .failure
2024-05-30 01:29:01 +02:00
mov edx, eax
push edx
2024-05-26 00:56:58 +02:00
invoke MapIoMem, eax, sizeof.NVME_MMIO, PG_SW+PG_NOCACHE
test eax, eax
2024-04-29 03:06:13 +02:00
jz .failure
2024-05-29 21:43:14 +02:00
;DEBUGF DBG_INFO, "(NVMe) MMIO allocated at: 0x%x\n", eax
mov dword [esi + pcidev.io_addr], eax
2024-05-29 21:43:14 +02:00
mov eax, dword [eax + NVME_MMIO.CAP + 4]
and eax, CAP_DSTRD
2024-06-09 19:07:01 +02:00
mov byte [esi + pcidev.dstrd], al
2024-05-29 21:43:14 +02:00
2024-05-30 01:29:01 +02:00
; 1003h + ((2y + 1) * (4 << CAP.DSTRD))
mov eax, 4
shl ax, cl
mov ecx, NVM_ASQS
shl ecx, 1
inc ecx
imul ecx, eax
add ecx, 0x1003
pop edx
invoke MapIoMem, edx, ecx, PG_SW+PG_NOCACHE
mov dword [esi + pcidev.io_addr], eax
2024-05-30 01:29:01 +02:00
mov eax, dword [eax + NVME_MMIO.VS]
DEBUGF DBG_INFO, "nvme%u: Controller version: 0x%x\n", [esi + pcidev.num], eax
2024-06-13 00:03:01 +02:00
mov dword [esi + pcidev.version], eax
2024-05-30 01:29:01 +02:00
pop ecx edx esi
2024-04-28 02:11:57 +02:00
xor eax, eax
inc eax
ret
2024-04-29 03:06:13 +02:00
.failure:
2024-05-29 21:43:14 +02:00
PDEBUGF DBG_INFO, "PCI(%u.%u.%u): something went wrong checking NVMe device compatibility\n", byte [esi + pcidev.bus], byte [esi + pcidev.devfn]
2024-05-30 01:29:01 +02:00
pop ecx edx esi
2024-05-26 00:56:58 +02:00
xor eax, eax
ret
endp
; nvme_init: Initializes the NVMe controller
2024-04-29 03:06:13 +02:00
proc nvme_init stdcall, pci:dword
2024-05-30 22:06:50 +02:00
push ebx esi edi
2024-05-30 20:21:45 +02:00
mov esi, dword [pci]
mov edi, dword [esi + pcidev.io_addr]
2024-05-26 01:26:51 +02:00
2024-05-29 21:43:14 +02:00
if 0
2024-05-26 22:54:59 +02:00
mov eax, dword [edi + NVME_MMIO.CAP]
DEBUGF DBG_INFO, "(NVMe) CAP (0-31): 0x%x\n", eax
mov eax, dword [edi + NVME_MMIO.CAP + 4]
DEBUGF DBG_INFO, "(NVMe) CAP (32-63): 0x%x\n", eax
mov eax, dword [edi + NVME_MMIO.CC]
DEBUGF DBG_INFO, "(NVMe) CC: 0x%x\n", eax
mov eax, dword [edi + NVME_MMIO.CSTS]
DEBUGF DBG_INFO, "(NVMe) CSTS: 0x%x\n", eax
2024-05-26 01:26:51 +02:00
end if
2024-06-28 01:19:06 +02:00
; check maximum queue entries supported
mov ax, word [edi + NVME_MMIO.CAP]
cmp ax, SQ_ENTRIES
jb .exit_fail
2024-06-28 01:19:06 +02:00
2024-05-24 01:15:34 +02:00
; For some reason, bit 7 (No I/O command set supported) is also set to 1 despite bit 0 (NVM command set)
; being set to 1.. so I am not sure if bit 7 should be checked at all.. investigate later.
2024-05-26 22:54:59 +02:00
mov eax, dword [edi + NVME_MMIO.CAP + 4]
test eax, CAP_CSS_NVM_CMDSET
jz .exit_fail
2024-05-26 02:37:42 +02:00
; Reset controller before we configure it
stdcall nvme_controller_reset, esi
if __DEBUG__
stdcall nvme_wait, edi
end if
2024-05-26 22:54:59 +02:00
mov eax, dword [edi + NVME_MMIO.CAP + 4]
and eax, CAP_MPSMIN
shr eax, 16
cmp eax, NVM_MPS
ja .exit_fail
2024-05-26 22:54:59 +02:00
mov eax, dword [edi + NVME_MMIO.CAP + 4]
and eax, CAP_MPSMAX
shr eax, 20
cmp eax, NVM_MPS
jb .exit_fail
2024-05-26 02:37:42 +02:00
2024-06-13 00:03:01 +02:00
; Configure IOSQES, IOCQES, AMS, MPS, CSS
and dword [edi + NVME_MMIO.CC], not (CC_AMS or CC_MPS or CC_CSS or CC_IOSQES or CC_IOCQES)
2024-05-26 22:54:59 +02:00
mov eax, dword [edi + NVME_MMIO.CC]
2024-06-13 00:03:01 +02:00
; CSS = 0 (NVM Command Set)
; AMS = 0 (Round Robin)
; MPS = 0 (4KiB Pages)
; IOSQES = 6 (64B)
; IOCQES = 4 (16B)
or eax, (4 shl 20) or (6 shl 16)
mov dword [edi + NVME_MMIO.CC], eax
2024-05-26 02:37:42 +02:00
; Configure Admin Queue Attributes
2024-05-26 22:54:59 +02:00
mov eax, dword [edi + NVME_MMIO.AQA]
and eax, not (AQA_ASQS or AQA_ACQS)
or eax, NVM_ASQS or (NVM_ACQS shl 16)
2024-05-26 22:54:59 +02:00
mov dword [edi + NVME_MMIO.AQA], eax
2024-05-26 02:37:42 +02:00
; Allocate list of queues
2024-06-27 21:55:29 +02:00
invoke KernelAlloc, sizeof.NVM_QUEUE_ENTRY * (LAST_QUEUE_ID + 1)
test eax, eax
jz .exit_fail
mov dword [esi + pcidev.queue_entries], eax
2024-06-27 21:55:29 +02:00
mov edi, eax
stdcall memset, eax, 0, sizeof.NVM_QUEUE_ENTRY * (LAST_QUEUE_ID + 1)
2024-06-27 21:55:29 +02:00
; Allocate submission/completion queue pointers
xor ecx, ecx
@@:
push ecx
invoke CreateRingBuffer, 0x1000, PG_SW
2024-06-27 21:55:29 +02:00
pop ecx
2024-05-26 22:54:59 +02:00
test eax, eax
jz .exit_fail
2024-06-27 21:55:29 +02:00
mov dword [edi + ecx + NVM_QUEUE_ENTRY.sq_ptr], eax
push ecx
stdcall memset, eax, 0, sizeof.SQ_ENTRY * SQ_ENTRIES
invoke CreateRingBuffer, 0x1000, PG_SW
2024-06-27 21:55:29 +02:00
pop ecx
2024-05-26 22:54:59 +02:00
test eax, eax
jz .exit_fail
2024-06-27 21:55:29 +02:00
mov dword [edi + ecx + NVM_QUEUE_ENTRY.cq_ptr], eax
push ecx
stdcall memset, eax, 0, sizeof.CQ_ENTRY * CQ_ENTRIES
pop ecx
mov dword [edi + ecx + NVM_QUEUE_ENTRY.phase_tag], CQ_PHASE_TAG
add ecx, sizeof.NVM_QUEUE_ENTRY
cmp ecx, (LAST_QUEUE_ID + 1) * sizeof.NVM_QUEUE_ENTRY
jne @b
2024-06-10 00:13:01 +02:00
2024-06-27 21:55:29 +02:00
; Configure Admin Submission/Completion Queue Base Address
mov esi, [pci]
mov esi, dword [esi + pcidev.io_addr]
mov eax, dword [edi + NVM_QUEUE_ENTRY.sq_ptr]
invoke GetPhysAddr
mov dword [esi + NVME_MMIO.ASQ], eax
mov dword [esi + NVME_MMIO.ASQ + 4], 0
2024-06-10 00:13:01 +02:00
2024-06-27 21:55:29 +02:00
mov eax, dword [edi + NVM_QUEUE_ENTRY.cq_ptr]
invoke GetPhysAddr
mov dword [esi + NVME_MMIO.ACQ], eax
mov dword [esi + NVME_MMIO.ACQ + 4], 0
2024-06-10 00:13:01 +02:00
2024-05-30 22:06:50 +02:00
; Attach interrupt handler
2024-06-27 21:55:29 +02:00
mov esi, [pci]
2024-05-30 22:06:50 +02:00
movzx eax, byte [esi + pcidev.iline]
DEBUGF DBG_INFO, "nvme%u: Attaching interrupt handler to IRQ %u\n", [esi + pcidev.num], eax
2024-06-03 20:14:59 +02:00
invoke AttachIntHandler, eax, irq_handler, 0
2024-05-30 22:06:50 +02:00
test eax, eax
jz .exit_fail
DEBUGF DBG_INFO, "nvme%u: Successfully attached interrupt handler\n", [esi + pcidev.num]
2024-05-30 22:06:50 +02:00
2024-06-03 20:14:59 +02:00
; Restart the controller
stdcall nvme_controller_start, esi
2024-06-03 20:14:59 +02:00
2024-06-09 19:07:01 +02:00
invoke KernelAlloc, 0x1000
2024-05-28 21:01:25 +02:00
test eax, eax
jz .exit_fail
2024-06-27 21:55:29 +02:00
mov edi, eax
2024-06-09 19:07:01 +02:00
invoke GetPhysAddr
; pci:dword, nsid:dword, dptr:dword, cns:byte
stdcall nvme_identify, [pci], 0, eax, CNS_IDCS
mov eax, dword [edi + IDENTC.nn]
mov dword [esi + pcidev.nn], eax
DEBUGF DBG_INFO, "nvme%u: Namespace Count: %u\n", [esi + pcidev.num], eax
lea ebx, byte [edi + IDENTC.sn]
2024-06-13 00:03:01 +02:00
lea eax, byte [esi + pcidev.serial]
stdcall memcpy, eax, ebx, 20
DEBUGF DBG_INFO, "nvme%u: Serial Number: %s\n", [esi + pcidev.num], eax
add ebx, 20
2024-06-13 00:03:01 +02:00
lea eax, byte [esi + pcidev.model]
stdcall memcpy, eax, ebx, 40
DEBUGF DBG_INFO, "nvme%u: Model: %s\n", [esi + pcidev.num], eax
mov edx, dword [esi + pcidev.version]
2024-05-26 22:54:59 +02:00
cmp edx, VS140
jb @f
2024-06-13 00:03:01 +02:00
; This is a reserved field in pre-1.4 controllers
mov al, byte [edi + IDENTC.cntrltype]
2024-06-13 00:03:01 +02:00
cmp al, CNTRLTYPE_IO_CONTROLLER
jne .exit_fail
DEBUGF DBG_INFO, "nvme%u: I/O controller detected...\n", [esi + pcidev.num]
2024-06-13 00:03:01 +02:00
@@:
mov al, byte [edi + IDENTC.sqes]
2024-06-13 00:03:01 +02:00
and al, 11110000b
cmp al, 0x60 ; maximum submission queue size should at least be 64 bytes
jb .exit_fail
mov al, byte [edi + IDENTC.cqes]
2024-06-13 00:03:01 +02:00
and al, 11110000b
and al, 0x40 ; maximum completion queue entry size should at least be 16 bytes
jb .exit_fail
invoke KernelFree, edi
mov eax, 1 or (1 shl 16) ; CDW11 (set the number of queues we want)
stdcall set_features, [pci], NULLPTR, FID_NUMBER_OF_QUEUES, eax
2024-06-27 21:55:29 +02:00
mov esi, [pci]
mov esi, dword [esi + pcidev.queue_entries]
mov esi, dword [esi + NVM_QUEUE_ENTRY.cq_ptr]
mov eax, dword [esi + sizeof.CQ_ENTRY + CQ_ENTRY.cdw0]
if __DEBUG__
DEBUGF DBG_INFO, "nvme%u: Set Features CDW0: 0x%x\n", [esi + pcidev.num], eax
end if
test ax, ax ; Number of I/O Submission Queues allocated
jz .exit_fail
shl eax, 16
test ax, ax ; Number of I/O Completion Queues allocated
jnz .exit_fail
; Create I/O Queues
; (TODO: create N queue pairs for N CPU cores, see page 8 of NVMe 1.4 spec for an explaination
mov esi, [pci]
2024-06-28 00:31:51 +02:00
mov edi, esi
mov esi, dword [esi + pcidev.queue_entries]
lea esi, [esi + sizeof.NVM_QUEUE_ENTRY]
2024-06-28 00:31:51 +02:00
mov eax, dword [esi + NVM_QUEUE_ENTRY.cq_ptr]
invoke GetPhysAddr
stdcall create_io_completion_queue, [pci], eax, 1, IEN_ON
2024-06-28 01:19:06 +02:00
DEBUGF DBG_INFO, "nvme%u: Successfully created I/O completion queue 1\n", [edi + pcidev.num]
2024-06-28 00:31:51 +02:00
mov eax, dword [esi + NVM_QUEUE_ENTRY.sq_ptr]
invoke GetPhysAddr
stdcall create_io_submission_queue, [pci], eax, 1, 1
2024-06-28 01:19:06 +02:00
DEBUGF DBG_INFO, "nvme%u: Successfully created I/O submission queue 1\n", [edi + pcidev.num]
2024-06-25 21:24:52 +02:00
if 1
2024-06-27 21:55:29 +02:00
stdcall determine_active_nsids, [pci]
test eax, eax
jz .exit_fail ; No active NSIDS
mov esi, [pci]
mov dword [esi + pcidev.nsid], eax
DEBUGF DBG_INFO, "nvme%u: Found active NSID: %u\n", [esi + pcidev.num], eax
2024-06-25 21:24:52 +02:00
else
2024-06-27 21:55:29 +02:00
mov esi, [pci]
xor eax, eax
inc eax
mov dword [esi + pcidev.nsid], eax
2024-06-25 21:24:52 +02:00
end if
2024-06-27 21:55:29 +02:00
2024-06-25 21:24:52 +02:00
invoke KernelAlloc, 0x1000
test eax, eax
jz .exit_fail
mov edi, eax
invoke GetPhysAddr
stdcall nvme_identify, [pci], [esi + pcidev.nsid], eax, CNS_IDNS
2024-06-26 00:37:04 +02:00
invoke KernelAlloc, sizeof.NSINFO
test eax, eax
jz .exit_fail
mov ebx, eax
mov dword [esi + pcidev.nsinfo], eax
2024-06-25 21:24:52 +02:00
mov al, byte [edi + IDENTN.nsfeat]
2024-06-26 00:37:04 +02:00
mov byte [ebx + NSINFO.features], al
DEBUGF DBG_INFO, "nvme%un%u: Namespace Features: 0x%x\n", [esi + pcidev.num], [esi + pcidev.nsid], al
2024-06-26 00:37:04 +02:00
mov dword [ebx + NSINFO.pci], esi
mov eax, dword [edi + IDENTN.nsze]
mov dword [ebx + NSINFO.size], eax
mov eax, dword [edi + IDENTN.nsze + 4]
mov dword [ebx + NSINFO.size + 4], eax
mov eax, dword [edi + IDENTN.ncap]
mov dword [ebx + NSINFO.capacity], eax
mov eax, dword [edi + IDENTN.ncap + 4]
mov dword [ebx + NSINFO.capacity + 4], eax
DEBUGF DBG_INFO, "nvme%un%u: Namespace Size: %u + %u logical blocks\n", [esi + pcidev.num], [esi + pcidev.nsid], [edi + IDENTN.nsze], [edi + IDENTN.nsze + 4]
DEBUGF DBG_INFO, "nvme%un%u: Namespace Capacity: %u + %u logical blocks\n", [esi + pcidev.num], [esi + pcidev.nsid], [edi + IDENTN.ncap], [edi + IDENTN.ncap + 4]
2024-06-25 21:24:52 +02:00
mov eax, dword [edi + IDENTN.lbaf0]
shr eax, 16 ; Get LBADS
and eax, 0xff
stdcall pow2, eax
2024-06-26 00:37:04 +02:00
mov dword [ebx + NSINFO.lbads], eax
DEBUGF DBG_INFO, "nvme%un%u: Namespace LBA Data Size: %u\n", [esi + pcidev.num], [esi + pcidev.nsid], eax
2024-06-25 21:24:52 +02:00
invoke KernelFree, edi
2024-06-13 00:03:01 +02:00
2024-06-28 01:19:06 +02:00
invoke KernelAlloc, 0x1000
test eax, eax
jz .exit_fail
2024-07-01 19:56:00 +02:00
mov edi, eax
;stdcall memcpy, eax, test_string, 50
2024-06-28 01:19:06 +02:00
invoke GetPhysAddr
2024-07-01 19:56:00 +02:00
stdcall nvme_io_rw, [pci], 1, [esi + pcidev.nsid], eax, 0, 0, 1, NVM_CMD_READ
DEBUGF DBG_INFO, "%s\n", edi
DEBUGF DBG_INFO, "nvme%u: Successfully initialized driver\n", [esi + pcidev.num]
2024-05-26 19:24:34 +02:00
xor eax, eax
inc eax
2024-05-30 22:06:50 +02:00
pop edi esi ebx
2024-05-26 19:24:34 +02:00
ret
2024-03-31 21:43:38 +02:00
2024-04-02 01:47:14 +02:00
.exit_fail:
if __DEBUG__
mov esi, [pci]
DEBUGF DBG_INFO, "nvme%u: failed to initialize controller\n", [esi + pcidev.num]
end if
xor eax, eax
2024-05-30 22:06:50 +02:00
pop edi esi ebx
2024-03-31 21:43:38 +02:00
ret
2024-03-31 21:43:38 +02:00
endp
2024-06-12 21:47:32 +02:00
proc get_new_cid stdcall, pci:dword, y:dword
2024-06-12 01:57:02 +02:00
push esi
mov esi, [pci]
mov esi, [esi + pcidev.queue_entries]
2024-06-12 01:57:02 +02:00
mov ecx, [y]
imul ecx, sizeof.NVM_QUEUE_ENTRY
2024-06-24 22:55:29 +02:00
movzx eax, word [esi + ecx + NVM_QUEUE_ENTRY.tail]
pop esi
2024-06-12 01:57:02 +02:00
ret
endp
proc nvme_controller_reset stdcall, pci:dword
2024-05-24 01:15:34 +02:00
push esi edi
mov esi, [pci]
DEBUGF DBG_INFO, "nvme%u: Resetting Controller...\n", [esi + pcidev.num]
mov edi, dword [esi + pcidev.io_addr]
2024-05-26 23:19:25 +02:00
and dword [edi + NVME_MMIO.CC], 0xfffffffe ; CC.EN = 0
2024-05-24 01:15:34 +02:00
; Wait for controller to be brought to idle state, CSTS.RDY should be cleared to 0 when this happens
.wait:
2024-05-26 23:19:25 +02:00
test dword [edi + NVME_MMIO.CSTS], CSTS_RDY
2024-05-24 01:15:34 +02:00
jnz .wait
DEBUGF DBG_INFO, "nvme%u: Successfully reset controller...\n", [esi + pcidev.num]
pop edi esi
2024-05-24 01:15:34 +02:00
ret
endp
proc nvme_controller_start stdcall, pci:dword
push esi edi
mov esi, [pci]
DEBUGF DBG_INFO, "nvme%u: Starting Controller...\n", [esi + pcidev.num]
mov edi, dword [esi + pcidev.io_addr]
2024-06-24 22:55:29 +02:00
or dword [edi + NVME_MMIO.CC], 1 ; CC.EN = 1
; Wait for controller to be brought into active state, CSTS.RDY should be set to 1 when this happens
.wait:
2024-05-26 23:19:25 +02:00
test dword [edi + NVME_MMIO.CSTS], CSTS_RDY
jz .wait
DEBUGF DBG_INFO, "nvme%u: Successfully started controller...\n", [esi + pcidev.num]
pop edi esi
ret
endp
2024-05-26 00:56:58 +02:00
; Should be called only after the value of CC.EN has changed
2024-05-26 23:19:25 +02:00
proc nvme_wait stdcall, mmio:dword
2024-05-30 22:06:50 +02:00
push esi
2024-05-26 23:19:25 +02:00
mov esi, [mmio]
mov esi, dword [esi + NVME_MMIO.CAP]
and esi, CAP_TO
shr esi, 24
2024-06-12 01:57:02 +02:00
imul esi, 150 ; TODO: bad time delay, set to appropriate value later
2024-05-26 00:56:58 +02:00
invoke Sleep
2024-05-30 22:06:50 +02:00
pop esi
2024-05-24 01:15:34 +02:00
ret
endp
2024-06-09 19:07:01 +02:00
; Writes to completion queue 'y' head doorbell
proc cqyhdbl_write stdcall, pci:dword, y:dword, cqh:dword
2024-06-09 19:07:01 +02:00
2024-06-10 00:13:01 +02:00
push esi edi
2024-06-09 19:07:01 +02:00
mov esi, [pci]
; 1000h + ((2y + 1) * (4 << CAP.DSTRD))
2024-06-24 22:55:29 +02:00
mov eax, [y]
2024-06-10 00:13:01 +02:00
shl al, 1
inc al
2024-06-12 01:57:02 +02:00
mov edx, 4
2024-06-09 19:07:01 +02:00
mov cl, byte [esi + pcidev.dstrd]
2024-06-10 00:13:01 +02:00
shl dx, cl
imul dx, ax
add dx, 0x1000
2024-06-24 22:55:29 +02:00
mov ecx, [y]
2024-06-12 21:47:32 +02:00
imul ecx, sizeof.NVM_QUEUE_ENTRY
2024-06-12 01:57:02 +02:00
mov edi, dword [esi + pcidev.queue_entries]
2024-06-12 21:47:32 +02:00
lea edi, dword [edi + ecx]
2024-06-09 19:07:01 +02:00
mov esi, dword [esi + pcidev.io_addr]
mov eax, [cqh]
;DEBUGF DBG_INFO, "(NVMe) Writing to completion queue doorbell register 0x%x: %u\n", dx, ax
2024-06-09 19:07:01 +02:00
mov word [esi + edx], ax ; Write to CQyHDBL
2024-06-12 21:47:32 +02:00
mov word [edi + NVM_QUEUE_ENTRY.head], ax
2024-06-10 00:13:01 +02:00
pop edi esi
2024-06-09 19:07:01 +02:00
ret
endp
; Writes to submission queue 'y' tail doorbell
proc sqytdbl_write stdcall, pci:dword, y:word, cmd:dword
push ebx esi edi
mov edi, [pci]
mov edi, dword [edi + pcidev.queue_entries]
movzx ecx, [y]
imul ecx, sizeof.NVM_QUEUE_ENTRY
mov edi, dword [edi + ecx + NVM_QUEUE_ENTRY.sq_ptr]
mov esi, [cmd]
mov ecx, dword [esi + SQ_ENTRY.cdw0]
shr ecx, 16 ; Get CID
imul ecx, sizeof.SQ_ENTRY
lea edi, [edi + ecx]
stdcall memcpy, edi, esi, sizeof.SQ_ENTRY
mov edi, [pci]
mov esi, dword [edi + pcidev.io_addr]
mov edi, dword [edi + pcidev.queue_entries]
2024-06-27 23:10:25 +02:00
movzx ecx, [y]
imul ecx, sizeof.NVM_QUEUE_ENTRY
movzx eax, word [edi + ecx + NVM_QUEUE_ENTRY.tail]
cmp ax, NVM_ASQS
jb @f
xor ax, ax
@@:
mov esi, [pci]
inc ax
2024-06-27 23:10:25 +02:00
; 1000h + (2y * (4 << CAP.DSTRD))
movzx ebx, [y]
2024-06-27 23:10:25 +02:00
shl ebx, 1
mov edx, 4
mov cl, byte [esi + pcidev.dstrd]
2024-06-27 23:10:25 +02:00
shl edx, cl
imul edx, ebx
add edx, 0x1000
mov esi, dword [esi + pcidev.io_addr]
mov word [esi + edx], ax
movzx ecx, [y]
2024-06-27 23:10:25 +02:00
imul ecx, sizeof.NVM_QUEUE_ENTRY
mov word [edi + ecx + NVM_QUEUE_ENTRY.tail], ax
dec ax
movzx ecx, [y]
stdcall nvme_cmd_wait, [pci], ecx, eax
pop edi esi ebx
ret
endp
; Calculates 2^x
2024-05-30 01:29:01 +02:00
proc pow2 stdcall, x:byte
push ecx
2024-05-30 01:29:01 +02:00
mov cl, [x]
2024-05-29 21:43:14 +02:00
xor eax, eax
inc eax
2024-05-30 01:29:01 +02:00
test cl, cl
2024-05-29 21:43:14 +02:00
jnz @f
2024-05-30 01:29:01 +02:00
pop ecx
2024-05-29 21:43:14 +02:00
ret
2024-05-29 21:57:45 +02:00
@@:
2024-05-30 01:29:01 +02:00
shl eax, cl
pop ecx
ret
endp
2024-06-24 22:55:29 +02:00
proc nvme_cmd_wait stdcall, pci:dword, y:dword, cid:word
push esi
mov esi, [pci]
movzx ecx, word [cid]
2024-06-28 00:31:51 +02:00
mov edx, [y]
imul edx, sizeof.NVM_QUEUE_ENTRY
mov esi, dword [esi + pcidev.queue_entries]
lea esi, [esi + edx]
imul ecx, sizeof.CQ_ENTRY
2024-06-24 22:55:29 +02:00
mov eax, dword [esi + NVM_QUEUE_ENTRY.phase_tag]
mov esi, dword [esi + NVM_QUEUE_ENTRY.cq_ptr]
2024-06-24 22:55:29 +02:00
test eax, CQ_PHASE_TAG
jnz .phase_tag_1
2024-06-14 22:12:53 +02:00
@@:
2024-06-24 22:55:29 +02:00
test byte [esi + ecx + CQ_ENTRY.status], CQ_PHASE_TAG
jnz @b
pop esi
ret
.phase_tag_1:
;DEBUGF DBG_INFO, "status: %x\n", [esi + ecx + CQ_ENTRY.status]
test byte [esi + ecx + CQ_ENTRY.status], CQ_PHASE_TAG
2024-06-24 22:55:29 +02:00
jz .phase_tag_1
pop esi
ret
endp
2024-06-24 23:37:56 +02:00
proc is_queue_full stdcall, tail:word, head:word
push bx
mov ax, [tail]
mov bx, [head]
cmp ax, bx
je .not_full
2024-06-24 23:37:56 +02:00
test bx, bx
jnz @f
cmp ax, NVM_ASQS
jne @f
xor eax, eax
inc eax
ret
@@:
cmp ax, bx
jae .not_full
2024-06-24 23:37:56 +02:00
sub ax, bx
cmp ax, 1
jne .not_full
2024-06-24 23:37:56 +02:00
xor eax, eax
inc eax
ret
.not_full:
2024-06-24 23:37:56 +02:00
pop bx
xor eax, eax
ret
endp
proc consume_cq_entries stdcall, pci:dword, queue:dword
push esi edi
mov esi, [pci]
mov ecx, [queue]
imul ecx, sizeof.NVM_QUEUE_ENTRY
mov esi, dword [esi + pcidev.queue_entries]
lea esi, [esi + ecx]
mov edi, dword [esi + NVM_QUEUE_ENTRY.cq_ptr]
movzx eax, word [esi + NVM_QUEUE_ENTRY.tail]
movzx ecx, word [esi + NVM_QUEUE_ENTRY.head]
stdcall is_queue_full, eax, ecx
test eax, eax
jnz .end
movzx ecx, word [esi + NVM_QUEUE_ENTRY.head]
.loop:
cmp cx, word [esi + NVM_QUEUE_ENTRY.tail]
je .end
2024-06-28 00:31:51 +02:00
mov edx, ecx
imul edx, sizeof.CQ_ENTRY
mov ax, word [edi + edx + CQ_ENTRY.status]
DEBUGF DBG_INFO, "Status: 0x%x\n", ax
inc cx
push ecx
stdcall cqyhdbl_write, [pci], [queue], ecx
pop ecx
jmp .loop
.end:
pop edi esi
xor eax, eax
ret
endp
proc irq_handler
2024-06-14 22:12:53 +02:00
push esi edi
mov esi, dword [p_nvme_devices]
; check if the NVMe device generated an interrupt
invoke PciRead16, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.status
test al, 1000b ; check interrupt status
jz .not_our_irq
mov edi, dword [esi + pcidev.io_addr]
2024-06-27 21:13:37 +02:00
mov dword [edi + NVME_MMIO.INTMS], 0x3
xor ecx, ecx
@@:
push ecx
stdcall consume_cq_entries, [p_nvme_devices], ecx
pop ecx
inc ecx
cmp ecx, LAST_QUEUE_ID
2024-06-27 23:10:25 +02:00
jng @b
2024-06-09 19:07:01 +02:00
; Interrupt handled by driver, return 1
mov dword [edi + NVME_MMIO.INTMC], 0x3
pop edi esi
2024-06-03 20:14:59 +02:00
xor eax, eax
2024-06-09 19:07:01 +02:00
inc eax
2024-06-03 20:14:59 +02:00
ret
2024-06-03 18:02:36 +02:00
.not_our_irq:
; Interrupt not handled by driver, return 0
pop edi esi
xor eax, eax
ret
2024-05-30 22:06:50 +02:00
endp
2024-03-31 21:43:38 +02:00
proc nvme_cleanup
DEBUGF DBG_INFO, "(NVMe): Cleaning up...\n"
2024-05-26 22:54:59 +02:00
mov ecx, dword [pcidevs_len]
2024-04-29 03:06:13 +02:00
mov eax, dword [p_nvme_devices]
test eax, eax
jnz .loop
ret
2024-04-29 03:06:13 +02:00
.loop:
;invoke KernelFree, dword [p_nvme_devices + ecx * sizeof.pcidev + pcidev.ident_ptr]
2024-05-26 00:56:58 +02:00
dec ecx
test ecx, ecx
jnz .loop
2024-04-29 03:06:13 +02:00
invoke KernelFree, dword [p_nvme_devices]
2024-05-26 22:54:59 +02:00
@@:
2024-04-29 03:06:13 +02:00
ret
2024-03-28 23:43:18 +01:00
2024-04-29 03:06:13 +02:00
endp
2024-03-28 23:43:18 +01:00
;all initialized data place here
align 4
p_nvme_devices dd 0
pcidevs_len dd 0
2024-04-17 22:04:49 +02:00
my_service db "NVMe",0 ;max 16 chars include zero
disk_functions:
dd disk_functions.end - disk_functions
dd 0 ; no close function
dd 0 ; no closemedia function
dd nvme_query_media
dd 0 ; no read function (for now)
dd 0 ; no write function (for now)
dd 0 ; no flush function
dd 0 ; use default cache size
.end:
if __DEBUG__
2024-06-27 23:10:25 +02:00
test_string db "NVMe driver successfully wrote to disk! :D",0
include_debug_strings
end if
2024-03-28 23:43:18 +01:00
align 4
data fixups
end data