ETX4 works:
# mkfs.ext4 /dev/sys/logs
mke2fs 1.46.5 (30-Dec-2021)
/dev/sys/logs contains a xfs file system labelled 'logs'
Proceed anyway? (y,N) y
Discarding device blocks: done
Creating filesystem with 1310720 4k blocks and 327680 inodes
Filesystem UUID: 18846019-26a4-4681-a45b-cbc0307f45a6
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736
Allocating group tables: done
Writing inode tables: done
Creating journal (16384 blocks): done
Writing superblocks and filesystem accounting information: done
XFS fails:
# mkfs.xfs -f /dev/sys/logs
mkfs.xfs: /dev/sys/logs appears to contain an existing filesystem (ext4).
mkfs.xfs: Use the -f option to force overwrite.
[root@live:~]# mkfs.xfs -f /dev/sys/logs
meta-data=/dev/sys/logs isize=512 agcount=4, agsize=327680 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=1310720, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
mkfs.xfs: pwrite failed: Input/output error
libxfs_bwrite: write failed on (unknown) bno 0x9fff00/0x100, err=5
mkfs.xfs: Releasing dirty buffer to free list!
found dirty buffer (bulk) on free list!
mkfs.xfs: pwrite failed: Input/output error
libxfs_bwrite: write failed on (unknown) bno 0x0/0x100, err=5
mkfs.xfs: Releasing dirty buffer to free list!
found dirty buffer (bulk) on free list!
Metadata CRC error detected at 0x55922fbad0, xfs_cntbt block 0x780010/0x1000
mkfs.xfs: pwrite failed: Input/output error
libxfs_bwrite: write failed on xfs_inode bno 0xa0/0x20, err=5
mkfs.xfs: pwrite failed: Input/output error
libxfs_bwrite: write failed on xfs_inode bno 0x80/0x20, err=5
mkfs.xfs: Releasing dirty buffer to free list!
mkfs.xfs: Releasing dirty buffer to free list!
mkfs.xfs: Lost a write to the data device!
which triggers this at kernel level:
[ 163.106143] sg[0] phys_addr:0x000000020ae06200 offset:512 length:105984 dma_address:0x000000009401e000 dma_length:105984
[ 163.106152] sg[1] phys_addr:0x000000020aca0000 offset:0 length:25088 dma_address:0x0000000094038000 dma_length:25088
[ 163.106158] ------------[ cut here ]------------
[ 163.106168] WARNING: CPU: 3 PID: 229 at drivers/nvme/host/pci.c:700 nvme_queue_rq+0x930/0xae0
[ 163.106171] Invalid SGL for payload:131072 nents:2
[ 163.106174] Modules linked in: nft_compat nft_counter nft_chain_nat nf_tables overlay optee_armtz optee adc_keypad dolby_fw exfat gpio_keypad leds_state meson_clk_debug meson_cpufreq meson_gxl meson_ir reg_access audio_data audiodsp snd_soc_dummy_codec snd_soc_aml_t9015 snd_soc_aml_codec_tl1_acodec snd_usb_audio snd_hwdep snd_usbmidi_lib amvenc_multi jpegenc media_sync amvdec_av1_v4l amvdec_av1 amvdec_mavs amvdec_avs2 amvdec_vp9_v4l amvdec_vp9 amvdec_vc1 amvdec_mmpeg4_v4l amvdec_mmpeg4 amvdec_mmpeg12_v4l amvdec_mmpeg12 amvdec_mmjpeg_v4l amvdec_mmjpeg amvdec_h265_v4l amvdec_h265 amvdec_h264mvc amvdec_mh264_v4l amvdec_mh264 amvdec_ports zram stream_input decoder_common firmware media_clock aml_drm video_framerate_adapter snd_soc iv009_isp aml_media dhd iv009_isp_sensor iv009_isp_lens meson_gxbb_wdt mali_kbase aml_spicc snd_soc_notify meson_clk_measure iv009_isp_iq aml_rng aml_dvb_extern blocklayoutdriver aml_crypto_dma hifidsp reboot aml_gki_tool meson_saradc nfsv4 dns_resolver nfs nfs_acl nfsd auth_rpcgss lockd grace sunrpc xfs btrfs xor xor_neon raid6_pq
[ 163.106238] CPU: 3 PID: 229 Comm: kworker/3:1H Not tainted 5.4.180 #1.2
[ 163.106241] Hardware name: Khadas VIM4 (DT)
[ 163.106249] Workqueue: kblockd blk_mq_run_work_fn
[ 163.106253] Call trace:
[ 163.106260] [ffffffc02022b9a0+ 96][<ffffffd510091d20>] dump_backtrace+0x0/0x110
[ 163.106264] [ffffffc02022ba00+ 32][<ffffffd510092268>] show_stack+0x28/0x34
[ 163.106269] [ffffffc02022ba20+ 64][<ffffffd510e69004>] dump_stack+0xc4/0xec
[ 163.106275] [ffffffc02022ba60+ 112][<ffffffd5100bb920>] __warn+0x120/0x130
[ 163.106279] [ffffffc02022bad0+ 144][<ffffffd510e53454>] warn_slowpath_fmt+0x98/0xac
[ 163.106283] [ffffffc02022bb60+ 256][<ffffffd5107b2830>] nvme_queue_rq+0x930/0xae0
[ 163.106287] [ffffffc02022bc60+ 160][<ffffffd510581f6c>] blk_mq_dispatch_rq_list+0x178/0x61c
[ 163.106291] [ffffffc02022bd00+ 80][<ffffffd51058798c>] blk_mq_sched_dispatch_requests+0x17c/0x190
[ 163.106294] [ffffffc02022bd50+ 32][<ffffffd51057f544>] __blk_mq_run_hw_queue+0xa4/0x150
[ 163.106298] [ffffffc02022bd70+ 48][<ffffffd51057f61c>] blk_mq_run_work_fn+0x2c/0x40
[ 163.106302] [ffffffc02022bda0+ 80][<ffffffd5100df4e8>] process_one_work+0x1c8/0x580
[ 163.106305] [ffffffc02022bdf0+ 112][<ffffffd5100df91c>] worker_thread+0x7c/0x510
[ 163.106311] [ffffffc02022be60+ 0][<ffffffd5100e6ff4>] kthread+0x164/0x170
[ 163.106315] [0000000000000000+ 0][<ffffffd510087bb4>] ret_from_fork+0x10/0x18
[ 163.106318] ---[ end trace bdf3439c0fefce9e ]---
[ 163.106325] blk_update_request: I/O error, dev nvme0n1, sector 1520439040 op 0x1:(WRITE) flags 0x8800 phys_seg 2 prio class 0
[ 163.107367] blk_update_request: I/O error, dev nvme0n1, sector 1509953536 op 0x1:(WRITE) flags 0x8800 phys_seg 2 prio class 0
[ 163.135908] blk_update_request: I/O error, dev nvme0n1, sector 1509953696 op 0x1:(WRITE) flags 0x8800 phys_seg 4 prio class 0
[ 163.136912] blk_update_request: I/O error, dev nvme0n1, sector 1509953664 op 0x1:(WRITE) flags 0x8800 phys_seg 3 prio class 0
which seems related to this kernel bug.